[clang] [RISCV][clang] Support Zvfbfa C intrinsics (PR #164094)
Brandon Wu via cfe-commits
cfe-commits at lists.llvm.org
Sat Oct 18 07:44:34 PDT 2025
https://github.com/4vtomat created https://github.com/llvm/llvm-project/pull/164094
Co-authored-by: Craig Topper <craig.topper at sifive.com>
>From da52b9a5d2729572beecd02b53b9ea9a8eda1ccc Mon Sep 17 00:00:00 2001
From: Brandon Wu <songwu0813 at gmail.com>
Date: Sat, 18 Oct 2025 07:37:14 -0700
Subject: [PATCH] [RISCV][clang] Support Zvfbfa C intrinsics
Co-authored-by: Craig Topper <craig.topper at sifive.com>
---
clang/include/clang/Basic/riscv_vector.td | 83 +-
.../clang/Basic/riscv_vector_common.td | 68 +-
clang/lib/Sema/SemaRISCV.cpp | 3 +-
clang/lib/Support/RISCVVIntrinsicUtils.cpp | 5 +
.../zvfbfa/non-policy/non-overloaded/vfadd.c | 249 ++
.../non-policy/non-overloaded/vfclass.c | 134 ++
.../zvfbfa/non-policy/non-overloaded/vfmacc.c | 249 ++
.../zvfbfa/non-policy/non-overloaded/vfmadd.c | 249 ++
.../zvfbfa/non-policy/non-overloaded/vfmax.c | 249 ++
.../non-policy/non-overloaded/vfmerge.c | 69 +
.../zvfbfa/non-policy/non-overloaded/vfmin.c | 249 ++
.../zvfbfa/non-policy/non-overloaded/vfmsac.c | 249 ++
.../zvfbfa/non-policy/non-overloaded/vfmsub.c | 249 ++
.../zvfbfa/non-policy/non-overloaded/vfmul.c | 249 ++
.../zvfbfa/non-policy/non-overloaded/vfmv.c | 189 ++
.../zvfbfa/non-policy/non-overloaded/vfncvt.c | 724 ++++++
.../non-policy/non-overloaded/vfncvt_rod.c | 113 +
.../non-policy/non-overloaded/vfncvt_rtz.c | 267 +++
.../non-policy/non-overloaded/vfnmacc.c | 249 ++
.../non-policy/non-overloaded/vfnmadd.c | 249 ++
.../non-policy/non-overloaded/vfnmsac.c | 249 ++
.../non-policy/non-overloaded/vfnmsub.c | 249 ++
.../zvfbfa/non-policy/non-overloaded/vfrec7.c | 129 ++
.../non-policy/non-overloaded/vfrsqrt7.c | 129 ++
.../zvfbfa/non-policy/non-overloaded/vfrsub.c | 129 ++
.../zvfbfa/non-policy/non-overloaded/vfsgnj.c | 249 ++
.../non-policy/non-overloaded/vfsgnjn.c | 249 ++
.../non-policy/non-overloaded/vfsgnjx.c | 249 ++
.../non-policy/non-overloaded/vfslide1down.c | 129 ++
.../non-policy/non-overloaded/vfslide1up.c | 129 ++
.../zvfbfa/non-policy/non-overloaded/vfsub.c | 249 ++
.../zvfbfa/non-policy/non-overloaded/vfwadd.c | 899 ++++++++
.../zvfbfa/non-policy/non-overloaded/vfwcvt.c | 366 +++
.../non-policy/non-overloaded/vfwmacc.c | 486 ++++
.../non-policy/non-overloaded/vfwmsac.c | 486 ++++
.../zvfbfa/non-policy/non-overloaded/vfwmul.c | 455 ++++
.../non-policy/non-overloaded/vfwnmacc.c | 494 ++++
.../non-policy/non-overloaded/vfwnmsac.c | 494 ++++
.../zvfbfa/non-policy/non-overloaded/vfwsub.c | 899 ++++++++
.../zvfbfa/non-policy/non-overloaded/vmfeq.c | 249 ++
.../zvfbfa/non-policy/non-overloaded/vmfge.c | 249 ++
.../zvfbfa/non-policy/non-overloaded/vmfgt.c | 249 ++
.../zvfbfa/non-policy/non-overloaded/vmfle.c | 249 ++
.../zvfbfa/non-policy/non-overloaded/vmflt.c | 249 ++
.../zvfbfa/non-policy/non-overloaded/vmfne.c | 249 ++
.../zvfbfa/non-policy/overloaded/vfadd.c | 249 ++
.../zvfbfa/non-policy/overloaded/vfclass.c | 134 ++
.../zvfbfa/non-policy/overloaded/vfmacc.c | 249 ++
.../zvfbfa/non-policy/overloaded/vfmadd.c | 249 ++
.../zvfbfa/non-policy/overloaded/vfmax.c | 249 ++
.../zvfbfa/non-policy/overloaded/vfmerge.c | 69 +
.../zvfbfa/non-policy/overloaded/vfmin.c | 249 ++
.../zvfbfa/non-policy/overloaded/vfmsac.c | 249 ++
.../zvfbfa/non-policy/overloaded/vfmsub.c | 249 ++
.../zvfbfa/non-policy/overloaded/vfmul.c | 249 ++
.../zvfbfa/non-policy/overloaded/vfmv.c | 69 +
.../zvfbfa/non-policy/overloaded/vfncvt.c | 724 ++++++
.../zvfbfa/non-policy/overloaded/vfncvt_rod.c | 113 +
.../zvfbfa/non-policy/overloaded/vfncvt_rtz.c | 267 +++
.../zvfbfa/non-policy/overloaded/vfnmacc.c | 249 ++
.../zvfbfa/non-policy/overloaded/vfnmadd.c | 249 ++
.../zvfbfa/non-policy/overloaded/vfnmsac.c | 249 ++
.../zvfbfa/non-policy/overloaded/vfnmsub.c | 249 ++
.../zvfbfa/non-policy/overloaded/vfrec7.c | 129 ++
.../zvfbfa/non-policy/overloaded/vfrsqrt7.c | 129 ++
.../zvfbfa/non-policy/overloaded/vfrsub.c | 129 ++
.../zvfbfa/non-policy/overloaded/vfsgnj.c | 249 ++
.../zvfbfa/non-policy/overloaded/vfsgnjn.c | 249 ++
.../zvfbfa/non-policy/overloaded/vfsgnjx.c | 249 ++
.../non-policy/overloaded/vfslide1down.c | 129 ++
.../zvfbfa/non-policy/overloaded/vfslide1up.c | 129 ++
.../zvfbfa/non-policy/overloaded/vfsub.c | 249 ++
.../zvfbfa/non-policy/overloaded/vfwadd.c | 893 ++++++++
.../zvfbfa/non-policy/overloaded/vfwcvt.c | 366 +++
.../zvfbfa/non-policy/overloaded/vfwmacc.c | 474 ++++
.../zvfbfa/non-policy/overloaded/vfwmsac.c | 474 ++++
.../zvfbfa/non-policy/overloaded/vfwmul.c | 451 ++++
.../zvfbfa/non-policy/overloaded/vfwnmacc.c | 480 ++++
.../zvfbfa/non-policy/overloaded/vfwnmsac.c | 480 ++++
.../zvfbfa/non-policy/overloaded/vfwsub.c | 893 ++++++++
.../zvfbfa/non-policy/overloaded/vmfeq.c | 249 ++
.../zvfbfa/non-policy/overloaded/vmfge.c | 249 ++
.../zvfbfa/non-policy/overloaded/vmfgt.c | 249 ++
.../zvfbfa/non-policy/overloaded/vmfle.c | 249 ++
.../zvfbfa/non-policy/overloaded/vmflt.c | 249 ++
.../zvfbfa/non-policy/overloaded/vmfne.c | 249 ++
.../zvfbfa/policy/non-overloaded/vfadd.c | 489 ++++
.../zvfbfa/policy/non-overloaded/vfclass.c | 272 +++
.../zvfbfa/policy/non-overloaded/vfmacc.c | 489 ++++
.../zvfbfa/policy/non-overloaded/vfmadd.c | 489 ++++
.../zvfbfa/policy/non-overloaded/vfmax.c | 489 ++++
.../zvfbfa/policy/non-overloaded/vfmerge.c | 69 +
.../zvfbfa/policy/non-overloaded/vfmin.c | 489 ++++
.../zvfbfa/policy/non-overloaded/vfmsac.c | 489 ++++
.../zvfbfa/policy/non-overloaded/vfmsub.c | 489 ++++
.../zvfbfa/policy/non-overloaded/vfmul.c | 489 ++++
.../zvfbfa/policy/non-overloaded/vfmv.c | 129 ++
.../zvfbfa/policy/non-overloaded/vfncvt.c | 1577 +++++++++++++
.../zvfbfa/policy/non-overloaded/vfncvt_rod.c | 233 ++
.../zvfbfa/policy/non-overloaded/vfncvt_rtz.c | 572 +++++
.../zvfbfa/policy/non-overloaded/vfnmacc.c | 489 ++++
.../zvfbfa/policy/non-overloaded/vfnmadd.c | 489 ++++
.../zvfbfa/policy/non-overloaded/vfnmsac.c | 489 ++++
.../zvfbfa/policy/non-overloaded/vfnmsub.c | 489 ++++
.../zvfbfa/policy/non-overloaded/vfrec7.c | 249 ++
.../zvfbfa/policy/non-overloaded/vfrsqrt7.c | 249 ++
.../zvfbfa/policy/non-overloaded/vfrsub.c | 249 ++
.../zvfbfa/policy/non-overloaded/vfsgnj.c | 489 ++++
.../zvfbfa/policy/non-overloaded/vfsgnjn.c | 489 ++++
.../zvfbfa/policy/non-overloaded/vfsgnjx.c | 489 ++++
.../policy/non-overloaded/vfslide1down.c | 249 ++
.../zvfbfa/policy/non-overloaded/vfslide1up.c | 249 ++
.../zvfbfa/policy/non-overloaded/vfsub.c | 489 ++++
.../zvfbfa/policy/non-overloaded/vfwadd.c | 2007 +++++++++++++++++
.../zvfbfa/policy/non-overloaded/vfwcvt.c | 765 +++++++
.../zvfbfa/policy/non-overloaded/vfwmacc.c | 1017 +++++++++
.../zvfbfa/policy/non-overloaded/vfwmsac.c | 1017 +++++++++
.../zvfbfa/policy/non-overloaded/vfwmul.c | 1015 +++++++++
.../zvfbfa/policy/non-overloaded/vfwnmacc.c | 1034 +++++++++
.../zvfbfa/policy/non-overloaded/vfwnmsac.c | 1034 +++++++++
.../zvfbfa/policy/non-overloaded/vfwsub.c | 2007 +++++++++++++++++
.../zvfbfa/policy/non-overloaded/vmfeq.c | 129 ++
.../zvfbfa/policy/non-overloaded/vmfge.c | 129 ++
.../zvfbfa/policy/non-overloaded/vmfgt.c | 129 ++
.../zvfbfa/policy/non-overloaded/vmfle.c | 129 ++
.../zvfbfa/policy/non-overloaded/vmflt.c | 129 ++
.../zvfbfa/policy/non-overloaded/vmfne.c | 129 ++
.../zvfbfa/policy/overloaded/vfadd.c | 489 ++++
.../zvfbfa/policy/overloaded/vfclass.c | 272 +++
.../zvfbfa/policy/overloaded/vfmacc.c | 489 ++++
.../zvfbfa/policy/overloaded/vfmadd.c | 489 ++++
.../zvfbfa/policy/overloaded/vfmax.c | 489 ++++
.../zvfbfa/policy/overloaded/vfmerge.c | 69 +
.../zvfbfa/policy/overloaded/vfmin.c | 489 ++++
.../zvfbfa/policy/overloaded/vfmsac.c | 489 ++++
.../zvfbfa/policy/overloaded/vfmsub.c | 489 ++++
.../zvfbfa/policy/overloaded/vfmul.c | 489 ++++
.../zvfbfa/policy/overloaded/vfmv.c | 129 ++
.../zvfbfa/policy/overloaded/vfncvt.c | 1539 +++++++++++++
.../zvfbfa/policy/overloaded/vfncvt_rod.c | 233 ++
.../zvfbfa/policy/overloaded/vfncvt_rtz.c | 572 +++++
.../zvfbfa/policy/overloaded/vfnmacc.c | 489 ++++
.../zvfbfa/policy/overloaded/vfnmadd.c | 489 ++++
.../zvfbfa/policy/overloaded/vfnmsac.c | 489 ++++
.../zvfbfa/policy/overloaded/vfnmsub.c | 489 ++++
.../zvfbfa/policy/overloaded/vfrec7.c | 249 ++
.../zvfbfa/policy/overloaded/vfrsqrt7.c | 249 ++
.../zvfbfa/policy/overloaded/vfrsub.c | 249 ++
.../zvfbfa/policy/overloaded/vfsgnj.c | 489 ++++
.../zvfbfa/policy/overloaded/vfsgnjn.c | 489 ++++
.../zvfbfa/policy/overloaded/vfsgnjx.c | 489 ++++
.../zvfbfa/policy/overloaded/vfslide1down.c | 249 ++
.../zvfbfa/policy/overloaded/vfslide1up.c | 249 ++
.../zvfbfa/policy/overloaded/vfsub.c | 489 ++++
.../zvfbfa/policy/overloaded/vfwadd.c | 1932 ++++++++++++++++
.../zvfbfa/policy/overloaded/vfwcvt.c | 765 +++++++
.../zvfbfa/policy/overloaded/vfwmacc.c | 977 ++++++++
.../zvfbfa/policy/overloaded/vfwmsac.c | 977 ++++++++
.../zvfbfa/policy/overloaded/vfwmul.c | 975 ++++++++
.../zvfbfa/policy/overloaded/vfwnmacc.c | 994 ++++++++
.../zvfbfa/policy/overloaded/vfwnmsac.c | 994 ++++++++
.../zvfbfa/policy/overloaded/vfwsub.c | 1932 ++++++++++++++++
.../zvfbfa/policy/overloaded/vmfeq.c | 129 ++
.../zvfbfa/policy/overloaded/vmfge.c | 129 ++
.../zvfbfa/policy/overloaded/vmfgt.c | 129 ++
.../zvfbfa/policy/overloaded/vmfle.c | 129 ++
.../zvfbfa/policy/overloaded/vmflt.c | 129 ++
.../zvfbfa/policy/overloaded/vmfne.c | 129 ++
168 files changed, 69141 insertions(+), 20 deletions(-)
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfadd.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfclass.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfmacc.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfmadd.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfmax.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfmerge.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfmin.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfmsac.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfmsub.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfmul.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfmv.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfncvt.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfncvt_rod.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfncvt_rtz.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfnmacc.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfnmadd.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfnmsac.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfnmsub.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfrec7.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfrsqrt7.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfrsub.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfsgnj.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfsgnjn.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfsgnjx.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfslide1down.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfslide1up.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfsub.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfwadd.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfwcvt.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfwmacc.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfwmsac.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfwmul.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfwnmacc.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfwnmsac.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfwsub.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vmfeq.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vmfge.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vmfgt.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vmfle.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vmflt.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vmfne.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfadd.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfclass.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfmacc.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfmadd.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfmax.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfmerge.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfmin.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfmsac.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfmsub.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfmul.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfmv.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfncvt.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfncvt_rod.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfncvt_rtz.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfnmacc.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfnmadd.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfnmsac.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfnmsub.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfrec7.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfrsqrt7.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfrsub.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfsgnj.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfsgnjn.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfsgnjx.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfslide1down.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfslide1up.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfsub.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfwadd.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfwcvt.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfwmacc.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfwmsac.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfwmul.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfwnmacc.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfwnmsac.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfwsub.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vmfeq.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vmfge.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vmfgt.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vmfle.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vmflt.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vmfne.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfadd.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfclass.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfmacc.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfmadd.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfmax.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfmerge.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfmin.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfmsac.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfmsub.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfmul.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfmv.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfncvt.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfncvt_rod.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfncvt_rtz.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfnmacc.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfnmadd.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfnmsac.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfnmsub.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfrec7.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfrsqrt7.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfrsub.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfsgnj.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfsgnjn.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfsgnjx.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfslide1down.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfslide1up.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfsub.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfwadd.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfwcvt.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfwmacc.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfwmsac.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfwmul.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfwnmacc.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfwnmsac.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfwsub.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vmfeq.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vmfge.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vmfgt.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vmfle.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vmflt.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vmfne.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfadd.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfclass.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfmacc.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfmadd.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfmax.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfmerge.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfmin.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfmsac.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfmsub.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfmul.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfmv.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfncvt.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfncvt_rod.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfncvt_rtz.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfnmacc.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfnmadd.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfnmsac.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfnmsub.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfrec7.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfrsqrt7.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfrsub.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfsgnj.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfsgnjn.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfsgnjx.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfslide1down.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfslide1up.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfsub.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfwadd.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfwcvt.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfwmacc.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfwmsac.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfwmul.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfwnmacc.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfwnmsac.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfwsub.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vmfeq.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vmfge.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vmfgt.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vmfle.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vmflt.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vmfne.c
diff --git a/clang/include/clang/Basic/riscv_vector.td b/clang/include/clang/Basic/riscv_vector.td
index 07a8724b6f33d..96d8300a0faf3 100644
--- a/clang/include/clang/Basic/riscv_vector.td
+++ b/clang/include/clang/Basic/riscv_vector.td
@@ -1013,9 +1013,9 @@ let ManualCodegen = [{
}] in {
let HasFRMRoundModeOp = true in {
// 13.2. Vector Single-Width Floating-Point Add/Subtract Instructions
- defm vfadd : RVVFloatingBinBuiltinSetRoundingMode;
- defm vfsub : RVVFloatingBinBuiltinSetRoundingMode;
- defm vfrsub : RVVFloatingBinVFBuiltinSetRoundingMode;
+ defm vfadd : RVVFloatingBinBuiltinSetRoundingMode<HasBF=1>;
+ defm vfsub : RVVFloatingBinBuiltinSetRoundingMode<HasBF=1>;
+ defm vfrsub : RVVFloatingBinVFBuiltinSetRoundingMode<HasBF=1>;
// 13.3. Vector Widening Floating-Point Add/Subtract Instructions
// Widening FP add/subtract, 2*SEW = 2*SEW +/- SEW
@@ -1023,14 +1023,14 @@ let ManualCodegen = [{
defm vfwsub : RVVFloatingWidenOp0BinBuiltinSetRoundingMode;
// 13.4. Vector Single-Width Floating-Point Multiply/Divide Instructions
- defm vfmul : RVVFloatingBinBuiltinSetRoundingMode;
+ defm vfmul : RVVFloatingBinBuiltinSetRoundingMode<HasBF=1>;
defm vfdiv : RVVFloatingBinBuiltinSetRoundingMode;
defm vfrdiv : RVVFloatingBinVFBuiltinSetRoundingMode;
}
// 13.2. Vector Single-Width Floating-Point Add/Subtract Instructions
- defm vfadd : RVVFloatingBinBuiltinSet;
- defm vfsub : RVVFloatingBinBuiltinSet;
- defm vfrsub : RVVFloatingBinVFBuiltinSet;
+ defm vfadd : RVVFloatingBinBuiltinSet<HasBF=1>;
+ defm vfsub : RVVFloatingBinBuiltinSet<HasBF=1>;
+ defm vfrsub : RVVFloatingBinVFBuiltinSet<HasBF=1>;
// 13.3. Vector Widening Floating-Point Add/Subtract Instructions
// Widening FP add/subtract, 2*SEW = 2*SEW +/- SEW
@@ -1038,7 +1038,7 @@ let ManualCodegen = [{
defm vfwsub : RVVFloatingWidenOp0BinBuiltinSet;
// 13.4. Vector Single-Width Floating-Point Multiply/Divide Instructions
- defm vfmul : RVVFloatingBinBuiltinSet;
+ defm vfmul : RVVFloatingBinBuiltinSet<HasBF=1>;
defm vfdiv : RVVFloatingBinBuiltinSet;
defm vfrdiv : RVVFloatingBinVFBuiltinSet;
}
@@ -1065,6 +1065,10 @@ let ManualCodegen = [{
defm vfwmul : RVVOutOp0Op1BuiltinSet<"vfwmul", "x",
[["vv", "w", "wvvu"],
["vf", "w", "wveu"]]>;
+ let RequiredFeatures = ["zvfbfa"] in
+ defm vfwmul : RVVOutOp0Op1BuiltinSet<"vfwmul", "y",
+ [["vv", "vw", "wvvu"],
+ ["vf", "vw", "wveu"]]>;
}
}
// 13.3. Vector Widening Floating-Point Add/Subtract Instructions
@@ -1081,6 +1085,10 @@ let ManualCodegen = [{
defm vfwmul : RVVOutOp0Op1BuiltinSet<"vfwmul", "x",
[["vv", "w", "wvv"],
["vf", "w", "wve"]]>;
+ let RequiredFeatures = ["zvfbfa"] in
+ defm vfwmul : RVVOutOp0Op1BuiltinSet<"vfwmul", "y",
+ [["vv", "vw", "wvv"],
+ ["vf", "vw", "wve"]]>;
}
}
}
@@ -1170,6 +1178,8 @@ let ManualCodegen = [{
defm vfrec7 : RVVOutBuiltinSet<"vfrec7", "fd", [["v", "v", "vvu"]]>;
let RequiredFeatures = ["zvfh"] in
defm vfrec7 : RVVOutBuiltinSet<"vfrec7", "x", [["v", "v", "vvu"]]>;
+ let RequiredFeatures = ["zvfbfa"] in
+ defm vfrec7 : RVVOutBuiltinSet<"vfrec7", "y", [["v", "v", "vvu"]]>;
}
// 13.8. Vector Floating-Point Square-Root Instruction
defm vfsqrt : RVVOutBuiltinSet<"vfsqrt", "fd", [["v", "v", "vv"]]>;
@@ -1180,21 +1190,26 @@ let ManualCodegen = [{
defm vfrec7 : RVVOutBuiltinSet<"vfrec7", "fd", [["v", "v", "vv"]]>;
let RequiredFeatures = ["zvfh"] in
defm vfrec7 : RVVOutBuiltinSet<"vfrec7", "x", [["v", "v", "vv"]]>;
+ let RequiredFeatures = ["zvfbfa"] in
+ defm vfrec7 : RVVOutBuiltinSet<"vfrec7", "y", [["v", "v", "vv"]]>;
}
// 13.9. Vector Floating-Point Reciprocal Square-Root Estimate Instruction
defm vfrsqrt7 : RVVOutBuiltinSet<"vfrsqrt7", "fd", [["v", "v", "vv"]]>;
let RequiredFeatures = ["zvfh"] in
defm vfrsqrt7 : RVVOutBuiltinSet<"vfrsqrt7", "x", [["v", "v", "vv"]]>;
+let RequiredFeatures = ["zvfbfa"] in
+ defm vfrsqrt7 : RVVOutBuiltinSet<"vfrsqrt7", "y", [["v", "v", "vv"]]>;
+
// 13.11. Vector Floating-Point MIN/MAX Instructions
-defm vfmin : RVVFloatingBinBuiltinSet;
-defm vfmax : RVVFloatingBinBuiltinSet;
+defm vfmin : RVVFloatingBinBuiltinSet<HasBF=1>;
+defm vfmax : RVVFloatingBinBuiltinSet<HasBF=1>;
// 13.12. Vector Floating-Point Sign-Injection Instructions
-defm vfsgnj : RVVFloatingBinBuiltinSet;
-defm vfsgnjn : RVVFloatingBinBuiltinSet;
-defm vfsgnjx : RVVFloatingBinBuiltinSet;
+defm vfsgnj : RVVFloatingBinBuiltinSet<HasBF=1>;
+defm vfsgnjn : RVVFloatingBinBuiltinSet<HasBF=1>;
+defm vfsgnjx : RVVFloatingBinBuiltinSet<HasBF=1>;
}
defm vfneg_v : RVVPseudoVFUnaryBuiltin<"vfsgnjn", "fd">;
let RequiredFeatures = ["zvfh"] in
@@ -1219,6 +1234,8 @@ let UnMaskedPolicyScheme = HasPassthruOperand in {
defm vfclass : RVVOp0BuiltinSet<"vfclass", "fd", [["v", "Uv", "Uvv"]]>;
let RequiredFeatures = ["zvfh"] in
defm vfclass : RVVOp0BuiltinSet<"vfclass", "x", [["v", "Uv", "Uvv"]]>;
+let RequiredFeatures = ["zvfbfa"] in
+ defm vfclass : RVVOp0BuiltinSet<"vfclass", "y", [["v", "vUv", "Uvv"]]>;
}
// 13.15. Vector Floating-Point Merge Instruction
@@ -1239,6 +1256,9 @@ let HasMasked = false,
let RequiredFeatures = ["zvfh"] in
defm vfmerge : RVVOutOp1BuiltinSet<"vfmerge", "x",
[["vfm", "v", "vvem"]]>;
+ let RequiredFeatures = ["zvfbfa"] in
+ defm vfmerge : RVVOutOp1BuiltinSet<"vfmerge", "y",
+ [["vfm", "v", "vvem"]]>;
}
// 13.16. Vector Floating-Point Move Instruction
@@ -1252,6 +1272,9 @@ let HasMasked = false,
let RequiredFeatures = ["zvfh"] in
defm vfmv_v : RVVOutBuiltinSet<"vfmv_v_f", "x",
[["f", "v", "ve"]]>;
+ let RequiredFeatures = ["zvfbfa"] in
+ defm vfmv_v : RVVOutBuiltinSet<"vfmv_v_f", "y",
+ [["f", "v", "ve"]]>;
}
// 13.17. Single-Width Floating-Point/Integer Type-Convert Instructions
@@ -1287,10 +1310,16 @@ let Log2LMUL = [-3, -2, -1, 0, 1, 2] in {
defm : RVVConvBuiltinSet<"vfwcvt_f_x_v", "c", [["Fw", "Fwv"]]>;
}
}
+ let RequiredFeatures = ["zvfbfa"], OverloadedName = "vfwcvt_f_bf16" in {
+ defm : RVVConvBuiltinSet<"vfwcvt_f_xu_v", "c", [["Yw", "YwUv"]]>;
+ defm : RVVConvBuiltinSet<"vfwcvt_f_x_v", "c", [["Yw", "Ywv"]]>;
+ }
let OverloadedName = "vfwcvt_f" in {
defm : RVVConvBuiltinSet<"vfwcvt_f_f_v", "f", [["w", "wv"]]>;
let RequiredFeatures = ["zvfhmin"] in
defm : RVVConvBuiltinSet<"vfwcvt_f_f_v", "x", [["w", "wv"]]>;
+ let RequiredFeatures = ["zvfbfa"] in
+ defm : RVVConvBuiltinSet<"vfwcvt_f_f_v", "y", [["vw", "wv"]]>;
}
}
@@ -1300,17 +1329,23 @@ let Log2LMUL = [-3, -2, -1, 0, 1, 2] in {
defm : RVVConvBuiltinSet<"vfncvt_rtz_xu_f_w", "si", [["Uv", "UvFw"]]>;
let RequiredFeatures = ["zvfh"] in
defm : RVVConvBuiltinSet<"vfncvt_rtz_xu_f_w", "c", [["Uv", "UvFw"]]>;
+ let RequiredFeatures = ["zvfbfa"] in
+ defm : RVVConvBuiltinSet<"vfncvt_rtz_xu_f_w", "c", [["YwUv", "UvYw"]]>;
}
let OverloadedName = "vfncvt_rtz_x" in {
defm : RVVConvBuiltinSet<"vfncvt_rtz_x_f_w", "si", [["Iv", "IvFw"]]>;
let RequiredFeatures = ["zvfh"] in
defm : RVVConvBuiltinSet<"vfncvt_rtz_x_f_w", "c", [["Iv", "IvFw"]]>;
+ let RequiredFeatures = ["zvfbfa"] in
+ defm : RVVConvBuiltinSet<"vfncvt_rtz_x_f_w", "c", [["YwIv", "IvYw"]]>;
}
let OverloadedName = "vfncvt_rod_f" in {
defm : RVVConvBuiltinSet<"vfncvt_rod_f_f_w", "f", [["v", "vw"]]>;
let RequiredFeatures = ["zvfh"] in
defm : RVVConvBuiltinSet<"vfncvt_rod_f_f_w", "x", [["v", "vw"]]>;
}
+ let RequiredFeatures = ["zvfbfa"], OverloadedName = "vfncvt_rod_f_bf16" in
+ defm : RVVConvBuiltinSet<"vfncvt_rod_f_f_w", "y", [["v", "vw"]]>;
}
// Zvfbfmin - Vector convert BF16 to FP32
@@ -1363,11 +1398,15 @@ let ManualCodegen = [{
defm : RVVConvBuiltinSet<"vfncvt_x_f_w", "si", [["Iv", "IvFwu"]]>;
let RequiredFeatures = ["zvfh"] in
defm : RVVConvBuiltinSet<"vfncvt_x_f_w", "c", [["Iv", "IvFwu"]]>;
+ let RequiredFeatures = ["zvfbfa"] in
+ defm : RVVConvBuiltinSet<"vfncvt_x_f_w", "c", [["YwIv", "IvYwu"]]>;
}
let OverloadedName = "vfncvt_xu" in {
defm : RVVConvBuiltinSet<"vfncvt_xu_f_w", "si", [["Uv", "UvFwu"]]>;
let RequiredFeatures = ["zvfh"] in
defm : RVVConvBuiltinSet<"vfncvt_xu_f_w", "c", [["Uv", "UvFwu"]]>;
+ let RequiredFeatures = ["zvfbfa"] in
+ defm : RVVConvBuiltinSet<"vfncvt_xu_f_w", "c", [["YwUv", "UvYwu"]]>;
}
let OverloadedName = "vfncvt_f" in {
defm : RVVConvBuiltinSet<"vfncvt_f_x_w", "f", [["v", "vIwu"]]>;
@@ -1382,6 +1421,8 @@ let ManualCodegen = [{
let RequiredFeatures = ["zvfhmin"] in
defm : RVVConvBuiltinSet<"vfncvt_f_f_w", "x", [["v", "vwu"]]>;
}
+ let RequiredFeatures = ["zvfbfa"], OverloadedName = "vfncvt_f_bf16" in
+ defm : RVVConvBuiltinSet<"vfncvt_f_f_w", "y", [["v", "vwu"]]>;
}
// Zvfbfmin - Vector convert FP32 to BF16
@@ -1430,11 +1471,15 @@ let ManualCodegen = [{
defm : RVVConvBuiltinSet<"vfncvt_x_f_w", "si", [["Iv", "IvFw"]]>;
let RequiredFeatures = ["zvfh"] in
defm : RVVConvBuiltinSet<"vfncvt_x_f_w", "c", [["Iv", "IvFw"]]>;
+ let RequiredFeatures = ["zvfbfa"] in
+ defm : RVVConvBuiltinSet<"vfncvt_x_f_w", "c", [["YwIv", "IvYw"]]>;
}
let OverloadedName = "vfncvt_xu" in {
defm : RVVConvBuiltinSet<"vfncvt_xu_f_w", "si", [["Uv", "UvFw"]]>;
let RequiredFeatures = ["zvfh"] in
defm : RVVConvBuiltinSet<"vfncvt_xu_f_w", "c", [["Uv", "UvFw"]]>;
+ let RequiredFeatures = ["zvfbfa"] in
+ defm : RVVConvBuiltinSet<"vfncvt_xu_f_w", "c", [["YwUv", "UvYw"]]>;
}
let OverloadedName = "vfncvt_f" in {
defm : RVVConvBuiltinSet<"vfncvt_f_x_w", "f", [["v", "vIw"]]>;
@@ -1449,6 +1494,8 @@ let ManualCodegen = [{
let RequiredFeatures = ["zvfhmin"] in
defm : RVVConvBuiltinSet<"vfncvt_f_f_w", "x", [["v", "vw"]]>;
}
+ let RequiredFeatures = ["zvfbfa"], OverloadedName = "vfncvt_f_bf16" in
+ defm : RVVConvBuiltinSet<"vfncvt_f_f_w", "y", [["v", "vw"]]>;
}
// Zvfbfmin - Vector convert FP32 to BF16
@@ -1578,6 +1625,9 @@ let HasMasked = false, MaskedPolicyScheme = NonePolicy in {
let RequiredFeatures = ["zvfh"] in
defm vfmv_f : RVVOp0BuiltinSet<"vfmv_f_s", "x",
[["s", "ve", "ev"]]>;
+ let RequiredFeatures = ["zvfbfa"] in
+ defm vfmv_f : RVVOp0BuiltinSet<"vfmv_f_s", "y",
+ [["s", "ve", "ev"]]>;
}
let OverloadedName = "vfmv_s",
UnMaskedPolicyScheme = HasPassthruOperand,
@@ -1589,6 +1639,9 @@ let HasMasked = false, MaskedPolicyScheme = NonePolicy in {
defm vfmv_s : RVVOutBuiltinSet<"vfmv_s_f", "x",
[["f", "v", "ve"],
["x", "Uv", "UvUe"]]>;
+ let RequiredFeatures = ["zvfbfa"] in
+ defm vfmv_s : RVVOutBuiltinSet<"vfmv_s_f", "y",
+ [["f", "v", "ve"]]>;
}
}
@@ -1601,11 +1654,11 @@ defm vslidedown : RVVSlideDownBuiltinSet;
// 16.3.3. Vector Slide1up Instructions
let UnMaskedPolicyScheme = HasPassthruOperand in {
defm vslide1up : RVVSlideOneBuiltinSet;
-defm vfslide1up : RVVFloatingBinVFBuiltinSet;
+defm vfslide1up : RVVFloatingBinVFBuiltinSet<HasBF=1>;
// 16.3.4. Vector Slide1down Instruction
defm vslide1down : RVVSlideOneBuiltinSet;
-defm vfslide1down : RVVFloatingBinVFBuiltinSet;
+defm vfslide1down : RVVFloatingBinVFBuiltinSet<HasBF=1>;
// 16.4. Vector Register Gather Instructions
// signed and floating type
diff --git a/clang/include/clang/Basic/riscv_vector_common.td b/clang/include/clang/Basic/riscv_vector_common.td
index 767bcee7b1596..eaa2ba43885b0 100644
--- a/clang/include/clang/Basic/riscv_vector_common.td
+++ b/clang/include/clang/Basic/riscv_vector_common.td
@@ -83,6 +83,8 @@
// elements of the same width
// F: given a vector type, compute the vector type with floating-point type
// elements of the same width
+// Y: given a vector type, compute the vector type with bfloat16 type elements
+// of the same width
// S: given a vector type, computes its equivalent one for LMUL=1. This is a
// no-op if the vector was already LMUL=1
// (Log2EEW:Value): Log2EEW value could be 3/4/5/6 (8/16/32/64), given a
@@ -470,6 +472,10 @@ let HasMaskedOffOperand = false in {
defm "" : RVVOutOp1BuiltinSet<NAME, "x",
[["vv", "v", "vvvv"],
["vf", "v", "vvev"]]>;
+ let RequiredFeatures = ["zvfbfa"] in
+ defm "" : RVVOutOp1BuiltinSet<NAME, "y",
+ [["vv", "v", "vvvv"],
+ ["vf", "v", "vvev"]]>;
}
multiclass RVVFloatingTerBuiltinSetRoundingMode {
defm "" : RVVOutOp1BuiltinSet<NAME, "fd",
@@ -479,6 +485,10 @@ let HasMaskedOffOperand = false in {
defm "" : RVVOutOp1BuiltinSet<NAME, "x",
[["vv", "v", "vvvvu"],
["vf", "v", "vvevu"]]>;
+ let RequiredFeatures = ["zvfbfa"] in
+ defm "" : RVVOutOp1BuiltinSet<NAME, "y",
+ [["vv", "v", "vvvvu"],
+ ["vf", "v", "vvevu"]]>;
}
}
@@ -491,6 +501,10 @@ let HasMaskedOffOperand = false, Log2LMUL = [-2, -1, 0, 1, 2] in {
defm "" : RVVOutOp1Op2BuiltinSet<NAME, "x",
[["vv", "w", "wwvv"],
["vf", "w", "wwev"]]>;
+ let RequiredFeatures = ["zvfbfa"] in
+ defm "" : RVVOutOp1Op2BuiltinSet<NAME, "y",
+ [["vv", "vw", "wwvv"],
+ ["vf", "vw", "wwev"]]>;
}
multiclass RVVFloatingWidenTerBuiltinSetRoundingMode {
defm "" : RVVOutOp1Op2BuiltinSet<NAME, "f",
@@ -500,10 +514,14 @@ let HasMaskedOffOperand = false, Log2LMUL = [-2, -1, 0, 1, 2] in {
defm "" : RVVOutOp1Op2BuiltinSet<NAME, "x",
[["vv", "w", "wwvvu"],
["vf", "w", "wwevu"]]>;
+ let RequiredFeatures = ["zvfbfa"] in
+ defm "" : RVVOutOp1Op2BuiltinSet<NAME, "y",
+ [["vv", "vw", "wwvvu"],
+ ["vf", "vw", "wwevu"]]>;
}
}
-multiclass RVVFloatingBinBuiltinSet {
+multiclass RVVFloatingBinBuiltinSet<bit HasBF = 0> {
defm "" : RVVOutOp1BuiltinSet<NAME, "fd",
[["vv", "v", "vvv"],
["vf", "v", "vve"]]>;
@@ -511,9 +529,15 @@ multiclass RVVFloatingBinBuiltinSet {
defm "" : RVVOutOp1BuiltinSet<NAME, "x",
[["vv", "v", "vvv"],
["vf", "v", "vve"]]>;
+ if HasBF then {
+ let RequiredFeatures = ["zvfbfa"] in
+ defm "" : RVVOutOp1BuiltinSet<NAME, "y",
+ [["vv", "v", "vvv"],
+ ["vf", "v", "vve"]]>;
+ }
}
-multiclass RVVFloatingBinBuiltinSetRoundingMode {
+multiclass RVVFloatingBinBuiltinSetRoundingMode<bit HasBF = 0> {
defm "" : RVVOutOp1BuiltinSet<NAME, "fd",
[["vv", "v", "vvvu"],
["vf", "v", "vveu"]]>;
@@ -521,22 +545,38 @@ multiclass RVVFloatingBinBuiltinSetRoundingMode {
defm "" : RVVOutOp1BuiltinSet<NAME, "x",
[["vv", "v", "vvvu"],
["vf", "v", "vveu"]]>;
+ if HasBF then {
+ let RequiredFeatures = ["zvfbfa"] in
+ defm "" : RVVOutOp1BuiltinSet<NAME, "y",
+ [["vv", "v", "vvvu"],
+ ["vf", "v", "vveu"]]>;
+ }
}
-multiclass RVVFloatingBinVFBuiltinSet {
+multiclass RVVFloatingBinVFBuiltinSet<bit HasBF = 0> {
defm "" : RVVOutOp1BuiltinSet<NAME, "fd",
[["vf", "v", "vve"]]>;
let RequiredFeatures = ["zvfh"] in
defm "" : RVVOutOp1BuiltinSet<NAME, "x",
[["vf", "v", "vve"]]>;
+ if HasBF then {
+ let RequiredFeatures = ["zvfbfa"] in
+ defm "" : RVVOutOp1BuiltinSet<NAME, "y",
+ [["vf", "v", "vve"]]>;
+ }
}
-multiclass RVVFloatingBinVFBuiltinSetRoundingMode {
+multiclass RVVFloatingBinVFBuiltinSetRoundingMode<bit HasBF = 0> {
defm "" : RVVOutOp1BuiltinSet<NAME, "fd",
[["vf", "v", "vveu"]]>;
let RequiredFeatures = ["zvfh"] in
defm "" : RVVOutOp1BuiltinSet<NAME, "x",
[["vf", "v", "vveu"]]>;
+ if HasBF then {
+ let RequiredFeatures = ["zvfbfa"] in
+ defm "" : RVVOutOp1BuiltinSet<NAME, "y",
+ [["vf", "v", "vveu"]]>;
+ }
}
multiclass RVVFloatingMaskOutBuiltinSet {
@@ -547,6 +587,10 @@ multiclass RVVFloatingMaskOutBuiltinSet {
defm "" : RVVOp0Op1BuiltinSet<NAME, "x",
[["vv", "vm", "mvv"],
["vf", "vm", "mve"]]>;
+ let RequiredFeatures = ["zvfbfa"] in
+ defm "" : RVVOp0Op1BuiltinSet<NAME, "y",
+ [["vv", "vm", "mvv"],
+ ["vf", "vm", "mve"]]>;
}
multiclass RVVFloatingMaskOutVFBuiltinSet
@@ -748,6 +792,10 @@ multiclass RVVFloatingWidenBinBuiltinSet {
defm "" : RVVWidenBuiltinSet<NAME, "x",
[["vv", "w", "wvv"],
["vf", "w", "wve"]]>;
+ let RequiredFeatures = ["zvfbfa"] in
+ defm "" : RVVWidenBuiltinSet<NAME, "y",
+ [["vv", "vw", "wvv"],
+ ["vf", "vw", "wve"]]>;
}
multiclass RVVFloatingWidenBinBuiltinSetRoundingMode {
@@ -758,6 +806,10 @@ multiclass RVVFloatingWidenBinBuiltinSetRoundingMode {
defm "" : RVVWidenBuiltinSet<NAME, "x",
[["vv", "w", "wvvu"],
["vf", "w", "wveu"]]>;
+ let RequiredFeatures = ["zvfbfa"] in
+ defm "" : RVVWidenBuiltinSet<NAME, "y",
+ [["vv", "vw", "wvvu"],
+ ["vf", "vw", "wveu"]]>;
}
multiclass RVVFloatingWidenOp0BinBuiltinSet {
@@ -768,6 +820,10 @@ multiclass RVVFloatingWidenOp0BinBuiltinSet {
defm "" : RVVWidenWOp0BuiltinSet<NAME # "_w", "x",
[["wv", "w", "wwv"],
["wf", "w", "wwe"]]>;
+ let RequiredFeatures = ["zvfbfa"] in
+ defm "" : RVVWidenWOp0BuiltinSet<NAME # "_w", "y",
+ [["wv", "vw", "wwv"],
+ ["wf", "ew", "wwe"]]>;
}
multiclass RVVFloatingWidenOp0BinBuiltinSetRoundingMode {
@@ -778,4 +834,8 @@ multiclass RVVFloatingWidenOp0BinBuiltinSetRoundingMode {
defm "" : RVVWidenWOp0BuiltinSet<NAME # "_w", "x",
[["wv", "w", "wwvu"],
["wf", "w", "wweu"]]>;
+ let RequiredFeatures = ["zvfbfa"] in
+ defm "" : RVVWidenWOp0BuiltinSet<NAME # "_w", "y",
+ [["wv", "vw", "wwvu"],
+ ["wf", "ew", "wweu"]]>;
}
diff --git a/clang/lib/Sema/SemaRISCV.cpp b/clang/lib/Sema/SemaRISCV.cpp
index 3ba93ff98898b..c5ef0d535628d 100644
--- a/clang/lib/Sema/SemaRISCV.cpp
+++ b/clang/lib/Sema/SemaRISCV.cpp
@@ -1464,7 +1464,8 @@ void SemaRISCV::checkRVVTypeSupport(QualType Ty, SourceLocation Loc, Decl *D,
}
else if (Info.ElementType->isBFloat16Type() &&
!FeatureMap.lookup("zvfbfmin") &&
- !FeatureMap.lookup("xandesvbfhcvt"))
+ !FeatureMap.lookup("xandesvbfhcvt") &&
+ !FeatureMap.lookup("experimental-zvfbfa"))
if (DeclareAndesVectorBuiltins) {
Diag(Loc, diag::err_riscv_type_requires_extension, D)
<< Ty << "zvfbfmin or xandesvbfhcvt";
diff --git a/clang/lib/Support/RISCVVIntrinsicUtils.cpp b/clang/lib/Support/RISCVVIntrinsicUtils.cpp
index 5a4e805d4a9d1..dad3d0dae423a 100644
--- a/clang/lib/Support/RISCVVIntrinsicUtils.cpp
+++ b/clang/lib/Support/RISCVVIntrinsicUtils.cpp
@@ -654,6 +654,9 @@ PrototypeDescriptor::parsePrototypeDescriptor(
case 'F':
TM |= TypeModifier::Float;
break;
+ case 'Y':
+ TM |= TypeModifier::BFloat;
+ break;
case 'S':
TM |= TypeModifier::LMUL1;
break;
@@ -704,6 +707,8 @@ void RVVType::applyModifier(const PrototypeDescriptor &Transformer) {
ElementBitwidth *= 2;
LMUL.MulLog2LMUL(1);
Scale = LMUL.getScale(ElementBitwidth);
+ if (ScalarType == ScalarTypeKind::BFloat)
+ ScalarType = ScalarTypeKind::Float;
break;
case VectorTypeModifier::Widening4XVector:
ElementBitwidth *= 4;
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfadd.c
new file mode 100644
index 0000000000000..d7734e05a8aaa
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfadd.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfadd_vv_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfadd.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfadd_vv_bf16mf4(vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfadd_vv_bf16mf4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfadd_vf_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfadd.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfadd_vf_bf16mf4(vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_vf_bf16mf4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfadd_vv_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfadd.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfadd_vv_bf16mf2(vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfadd_vv_bf16mf2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfadd_vf_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfadd.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfadd_vf_bf16mf2(vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_vf_bf16mf2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfadd_vv_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfadd.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfadd_vv_bf16m1(vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfadd_vv_bf16m1(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfadd_vf_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfadd.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfadd_vf_bf16m1(vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_vf_bf16m1(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfadd_vv_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfadd.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfadd_vv_bf16m2(vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfadd_vv_bf16m2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfadd_vf_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfadd.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfadd_vf_bf16m2(vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_vf_bf16m2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfadd_vv_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfadd.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfadd_vv_bf16m4(vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfadd_vv_bf16m4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfadd_vf_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfadd.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfadd_vf_bf16m4(vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_vf_bf16m4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfadd_vv_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfadd.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfadd_vv_bf16m8(vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfadd_vv_bf16m8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfadd_vf_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfadd.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfadd_vf_bf16m8(vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_vf_bf16m8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfadd_vv_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfadd.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfadd_vv_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfadd_vv_bf16mf4_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfadd_vf_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfadd.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfadd_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_vf_bf16mf4_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfadd_vv_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfadd.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfadd_vv_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfadd_vv_bf16mf2_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfadd_vf_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfadd.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfadd_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_vf_bf16mf2_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfadd_vv_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfadd.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfadd_vv_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfadd_vv_bf16m1_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfadd_vf_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfadd.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfadd_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_vf_bf16m1_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfadd_vv_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfadd.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfadd_vv_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfadd_vv_bf16m2_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfadd_vf_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfadd.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfadd_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_vf_bf16m2_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfadd_vv_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfadd.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfadd_vv_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfadd_vv_bf16m4_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfadd_vf_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfadd.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfadd_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_vf_bf16m4_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfadd_vv_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfadd.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfadd_vv_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfadd_vv_bf16m8_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfadd_vf_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfadd.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfadd_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_vf_bf16m8_m(mask, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfclass.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfclass.c
new file mode 100644
index 0000000000000..68814f4672d05
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfclass.c
@@ -0,0 +1,134 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vfclass_v_bf16mf4_u16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfclass.nxv1bf16.i64(<vscale x 1 x i16> poison, <vscale x 1 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vfclass_v_bf16mf4_u16mf4(vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfclass_v_bf16mf4_u16mf4(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vfclass_v_bf16mf2_u16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfclass.nxv2bf16.i64(<vscale x 2 x i16> poison, <vscale x 2 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vfclass_v_bf16mf2_u16mf2(vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfclass_v_bf16mf2_u16mf2(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vfclass_v_bf16m1_u16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfclass.nxv4bf16.i64(<vscale x 4 x i16> poison, <vscale x 4 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vfclass_v_bf16m1_u16m1(vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfclass_v_bf16m1_u16m1(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vfclass_v_bf16m2_u16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfclass.nxv8bf16.i64(<vscale x 8 x i16> poison, <vscale x 8 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vfclass_v_bf16m2_u16m2(vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfclass_v_bf16m2_u16m2(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vfclass_v_bf16m4_u16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfclass.nxv16bf16.i64(<vscale x 16 x i16> poison, <vscale x 16 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vfclass_v_bf16m4_u16m4(vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfclass_v_bf16m4_u16m4(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vfclass_v_bf16m8_u16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vfclass.nxv32bf16.i64(<vscale x 32 x i16> poison, <vscale x 32 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vfclass_v_bf16m8_u16m8(vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfclass_v_bf16m8_u16m8(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vfclass_v_bf16mf4_u16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfclass.mask.nxv1bf16.i64(<vscale x 1 x i16> poison, <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vfclass_v_bf16mf4_u16mf4_m(vbool64_t vm, vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfclass_v_bf16mf4_u16mf4_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vfclass_v_bf16mf2_u16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfclass.mask.nxv2bf16.i64(<vscale x 2 x i16> poison, <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vfclass_v_bf16mf2_u16mf2_m(vbool32_t vm, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfclass_v_bf16mf2_u16mf2_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vfclass_v_bf16m1_u16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfclass.mask.nxv4bf16.i64(<vscale x 4 x i16> poison, <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vfclass_v_bf16m1_u16m1_m(vbool16_t vm, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfclass_v_bf16m1_u16m1_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vfclass_v_bf16m2_u16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfclass.mask.nxv8bf16.i64(<vscale x 8 x i16> poison, <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vfclass_v_bf16m2_u16m2_m(vbool8_t vm, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfclass_v_bf16m2_u16m2_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vfclass_v_bf16m4_u16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfclass.mask.nxv16bf16.i64(<vscale x 16 x i16> poison, <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vfclass_v_bf16m4_u16m4_m(vbool4_t vm, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfclass_v_bf16m4_u16m4_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vfclass_v_bf16m8_u16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vfclass.mask.nxv32bf16.i64(<vscale x 32 x i16> poison, <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vfclass_v_bf16m8_u16m8_m(vbool2_t vm, vbfloat16m8_t vs2,
+ size_t vl) {
+ return __riscv_vfclass_v_bf16m8_u16m8_m(vm, vs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfmacc.c
new file mode 100644
index 0000000000000..616455d5f3f9e
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfmacc.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmacc_vv_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmacc.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmacc_vv_bf16mf4(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmacc_vv_bf16mf4(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmacc_vf_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmacc.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmacc_vf_bf16mf4(vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmacc_vf_bf16mf4(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmacc_vv_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmacc.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmacc_vv_bf16mf2(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmacc_vv_bf16mf2(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmacc_vf_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmacc.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmacc_vf_bf16mf2(vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmacc_vf_bf16mf2(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmacc_vv_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmacc.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmacc_vv_bf16m1(vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmacc_vv_bf16m1(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmacc_vf_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmacc.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmacc_vf_bf16m1(vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmacc_vf_bf16m1(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmacc_vv_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmacc.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmacc_vv_bf16m2(vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmacc_vv_bf16m2(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmacc_vf_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmacc.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmacc_vf_bf16m2(vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmacc_vf_bf16m2(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmacc_vv_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmacc.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmacc_vv_bf16m4(vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmacc_vv_bf16m4(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmacc_vf_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmacc.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmacc_vf_bf16m4(vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmacc_vf_bf16m4(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmacc_vv_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmacc.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmacc_vv_bf16m8(vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmacc_vv_bf16m8(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmacc_vf_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmacc.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmacc_vf_bf16m8(vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmacc_vf_bf16m8(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmacc_vv_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmacc.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmacc_vv_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmacc_vv_bf16mf4_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmacc_vf_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmacc.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmacc_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmacc_vf_bf16mf4_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmacc_vv_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmacc.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmacc_vv_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmacc_vv_bf16mf2_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmacc_vf_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmacc.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmacc_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmacc_vf_bf16mf2_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmacc_vv_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmacc.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmacc_vv_bf16m1_m(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmacc_vv_bf16m1_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmacc_vf_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmacc.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmacc_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmacc_vf_bf16m1_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmacc_vv_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmacc.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmacc_vv_bf16m2_m(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmacc_vv_bf16m2_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmacc_vf_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmacc.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmacc_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmacc_vf_bf16m2_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmacc_vv_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmacc.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmacc_vv_bf16m4_m(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmacc_vv_bf16m4_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmacc_vf_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmacc.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmacc_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmacc_vf_bf16m4_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmacc_vv_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmacc.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmacc_vv_bf16m8_m(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmacc_vv_bf16m8_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmacc_vf_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmacc.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmacc_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmacc_vf_bf16m8_m(mask, vd, rs1, vs2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfmadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfmadd.c
new file mode 100644
index 0000000000000..eec662a3671c8
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfmadd.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmadd_vv_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmadd.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmadd_vv_bf16mf4(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmadd_vv_bf16mf4(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmadd_vf_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmadd.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmadd_vf_bf16mf4(vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmadd_vf_bf16mf4(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmadd_vv_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmadd.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmadd_vv_bf16mf2(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmadd_vv_bf16mf2(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmadd_vf_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmadd.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmadd_vf_bf16mf2(vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmadd_vf_bf16mf2(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmadd_vv_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmadd.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmadd_vv_bf16m1(vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmadd_vv_bf16m1(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmadd_vf_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmadd.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmadd_vf_bf16m1(vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmadd_vf_bf16m1(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmadd_vv_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmadd.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmadd_vv_bf16m2(vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmadd_vv_bf16m2(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmadd_vf_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmadd.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmadd_vf_bf16m2(vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmadd_vf_bf16m2(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmadd_vv_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmadd.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmadd_vv_bf16m4(vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmadd_vv_bf16m4(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmadd_vf_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmadd.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmadd_vf_bf16m4(vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmadd_vf_bf16m4(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmadd_vv_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmadd.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmadd_vv_bf16m8(vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmadd_vv_bf16m8(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmadd_vf_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmadd.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmadd_vf_bf16m8(vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmadd_vf_bf16m8(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmadd_vv_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmadd.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmadd_vv_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmadd_vv_bf16mf4_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmadd_vf_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmadd.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmadd_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmadd_vf_bf16mf4_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmadd_vv_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmadd.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmadd_vv_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmadd_vv_bf16mf2_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmadd_vf_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmadd.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmadd_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmadd_vf_bf16mf2_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmadd_vv_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmadd.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmadd_vv_bf16m1_m(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmadd_vv_bf16m1_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmadd_vf_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmadd.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmadd_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmadd_vf_bf16m1_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmadd_vv_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmadd.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmadd_vv_bf16m2_m(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmadd_vv_bf16m2_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmadd_vf_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmadd.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmadd_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmadd_vf_bf16m2_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmadd_vv_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmadd.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmadd_vv_bf16m4_m(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmadd_vv_bf16m4_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmadd_vf_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmadd.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmadd_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmadd_vf_bf16m4_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmadd_vv_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmadd.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmadd_vv_bf16m8_m(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmadd_vv_bf16m8_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmadd_vf_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmadd.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmadd_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmadd_vf_bf16m8_m(mask, vd, rs1, vs2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfmax.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfmax.c
new file mode 100644
index 0000000000000..dfdeb4e967a46
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfmax.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmax_vv_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmax.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmax_vv_bf16mf4(vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfmax_vv_bf16mf4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmax_vf_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmax.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmax_vf_bf16mf4(vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_vf_bf16mf4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmax_vv_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmax.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmax_vv_bf16mf2(vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfmax_vv_bf16mf2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmax_vf_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmax.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmax_vf_bf16mf2(vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_vf_bf16mf2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmax_vv_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmax.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmax_vv_bf16m1(vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfmax_vv_bf16m1(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmax_vf_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmax.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmax_vf_bf16m1(vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_vf_bf16m1(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmax_vv_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmax.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmax_vv_bf16m2(vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfmax_vv_bf16m2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmax_vf_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmax.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmax_vf_bf16m2(vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_vf_bf16m2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmax_vv_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmax.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmax_vv_bf16m4(vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfmax_vv_bf16m4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmax_vf_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmax.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmax_vf_bf16m4(vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_vf_bf16m4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmax_vv_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmax.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmax_vv_bf16m8(vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfmax_vv_bf16m8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmax_vf_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmax.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmax_vf_bf16m8(vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_vf_bf16m8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmax_vv_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmax.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmax_vv_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfmax_vv_bf16mf4_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmax_vf_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmax.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmax_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_vf_bf16mf4_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmax_vv_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmax.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmax_vv_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfmax_vv_bf16mf2_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmax_vf_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmax.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmax_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_vf_bf16mf2_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmax_vv_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmax.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmax_vv_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfmax_vv_bf16m1_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmax_vf_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmax.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmax_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_vf_bf16m1_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmax_vv_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmax.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmax_vv_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfmax_vv_bf16m2_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmax_vf_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmax.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmax_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_vf_bf16m2_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmax_vv_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmax.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmax_vv_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfmax_vv_bf16m4_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmax_vf_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmax.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmax_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_vf_bf16m4_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmax_vv_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmax.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmax_vv_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfmax_vv_bf16m8_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmax_vf_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmax.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmax_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_vf_bf16m8_m(mask, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfmerge.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfmerge.c
new file mode 100644
index 0000000000000..96221c5385dd9
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfmerge.c
@@ -0,0 +1,69 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmerge_vfm_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmerge.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmerge_vfm_bf16mf4(vbfloat16mf4_t op1, __bf16 op2, vbool64_t mask, size_t vl) {
+ return __riscv_vfmerge_vfm_bf16mf4(op1, op2, mask, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmerge_vfm_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmerge.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmerge_vfm_bf16mf2(vbfloat16mf2_t op1, __bf16 op2, vbool32_t mask, size_t vl) {
+ return __riscv_vfmerge_vfm_bf16mf2(op1, op2, mask, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmerge_vfm_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmerge.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmerge_vfm_bf16m1(vbfloat16m1_t op1, __bf16 op2, vbool16_t mask, size_t vl) {
+ return __riscv_vfmerge_vfm_bf16m1(op1, op2, mask, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmerge_vfm_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmerge.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmerge_vfm_bf16m2(vbfloat16m2_t op1, __bf16 op2, vbool8_t mask, size_t vl) {
+ return __riscv_vfmerge_vfm_bf16m2(op1, op2, mask, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmerge_vfm_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmerge.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmerge_vfm_bf16m4(vbfloat16m4_t op1, __bf16 op2, vbool4_t mask, size_t vl) {
+ return __riscv_vfmerge_vfm_bf16m4(op1, op2, mask, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmerge_vfm_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmerge.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmerge_vfm_bf16m8(vbfloat16m8_t op1, __bf16 op2, vbool2_t mask, size_t vl) {
+ return __riscv_vfmerge_vfm_bf16m8(op1, op2, mask, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfmin.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfmin.c
new file mode 100644
index 0000000000000..8f8d82ba21bc5
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfmin.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmin_vv_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmin.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmin_vv_bf16mf4(vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfmin_vv_bf16mf4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmin_vf_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmin.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmin_vf_bf16mf4(vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_vf_bf16mf4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmin_vv_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmin.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmin_vv_bf16mf2(vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfmin_vv_bf16mf2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmin_vf_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmin.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmin_vf_bf16mf2(vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_vf_bf16mf2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmin_vv_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmin.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmin_vv_bf16m1(vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfmin_vv_bf16m1(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmin_vf_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmin.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmin_vf_bf16m1(vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_vf_bf16m1(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmin_vv_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmin.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmin_vv_bf16m2(vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfmin_vv_bf16m2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmin_vf_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmin.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmin_vf_bf16m2(vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_vf_bf16m2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmin_vv_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmin.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmin_vv_bf16m4(vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfmin_vv_bf16m4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmin_vf_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmin.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmin_vf_bf16m4(vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_vf_bf16m4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmin_vv_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmin.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmin_vv_bf16m8(vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfmin_vv_bf16m8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmin_vf_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmin.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmin_vf_bf16m8(vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_vf_bf16m8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmin_vv_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmin.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmin_vv_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfmin_vv_bf16mf4_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmin_vf_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmin.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmin_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_vf_bf16mf4_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmin_vv_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmin.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmin_vv_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfmin_vv_bf16mf2_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmin_vf_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmin.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmin_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_vf_bf16mf2_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmin_vv_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmin.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmin_vv_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfmin_vv_bf16m1_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmin_vf_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmin.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmin_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_vf_bf16m1_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmin_vv_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmin.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmin_vv_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfmin_vv_bf16m2_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmin_vf_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmin.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmin_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_vf_bf16m2_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmin_vv_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmin.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmin_vv_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfmin_vv_bf16m4_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmin_vf_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmin.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmin_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_vf_bf16m4_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmin_vv_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmin.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmin_vv_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfmin_vv_bf16m8_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmin_vf_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmin.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmin_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_vf_bf16m8_m(mask, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfmsac.c
new file mode 100644
index 0000000000000..f4644dfb8d7e7
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfmsac.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmsac_vv_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmsac.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmsac_vv_bf16mf4(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmsac_vv_bf16mf4(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmsac_vf_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmsac.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmsac_vf_bf16mf4(vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmsac_vf_bf16mf4(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmsac_vv_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmsac.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmsac_vv_bf16mf2(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmsac_vv_bf16mf2(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmsac_vf_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmsac.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmsac_vf_bf16mf2(vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmsac_vf_bf16mf2(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmsac_vv_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmsac.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmsac_vv_bf16m1(vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmsac_vv_bf16m1(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmsac_vf_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmsac.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmsac_vf_bf16m1(vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmsac_vf_bf16m1(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmsac_vv_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmsac.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmsac_vv_bf16m2(vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmsac_vv_bf16m2(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmsac_vf_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmsac.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmsac_vf_bf16m2(vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmsac_vf_bf16m2(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmsac_vv_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmsac.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmsac_vv_bf16m4(vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmsac_vv_bf16m4(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmsac_vf_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmsac.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmsac_vf_bf16m4(vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmsac_vf_bf16m4(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmsac_vv_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmsac.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmsac_vv_bf16m8(vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmsac_vv_bf16m8(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmsac_vf_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmsac.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmsac_vf_bf16m8(vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmsac_vf_bf16m8(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmsac_vv_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmsac.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmsac_vv_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmsac_vv_bf16mf4_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmsac_vf_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmsac.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmsac_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmsac_vf_bf16mf4_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmsac_vv_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmsac.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmsac_vv_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmsac_vv_bf16mf2_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmsac_vf_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmsac.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmsac_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmsac_vf_bf16mf2_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmsac_vv_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmsac.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmsac_vv_bf16m1_m(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmsac_vv_bf16m1_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmsac_vf_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmsac.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmsac_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmsac_vf_bf16m1_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmsac_vv_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmsac.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmsac_vv_bf16m2_m(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmsac_vv_bf16m2_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmsac_vf_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmsac.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmsac_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmsac_vf_bf16m2_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmsac_vv_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmsac.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmsac_vv_bf16m4_m(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmsac_vv_bf16m4_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmsac_vf_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmsac.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmsac_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmsac_vf_bf16m4_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmsac_vv_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmsac.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmsac_vv_bf16m8_m(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmsac_vv_bf16m8_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmsac_vf_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmsac.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmsac_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmsac_vf_bf16m8_m(mask, vd, rs1, vs2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfmsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfmsub.c
new file mode 100644
index 0000000000000..07053afa3355c
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfmsub.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmsub_vv_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmsub.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmsub_vv_bf16mf4(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmsub_vv_bf16mf4(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmsub_vf_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmsub.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmsub_vf_bf16mf4(vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmsub_vf_bf16mf4(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmsub_vv_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmsub.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmsub_vv_bf16mf2(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmsub_vv_bf16mf2(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmsub_vf_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmsub.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmsub_vf_bf16mf2(vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmsub_vf_bf16mf2(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmsub_vv_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmsub.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmsub_vv_bf16m1(vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmsub_vv_bf16m1(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmsub_vf_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmsub.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmsub_vf_bf16m1(vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmsub_vf_bf16m1(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmsub_vv_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmsub.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmsub_vv_bf16m2(vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmsub_vv_bf16m2(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmsub_vf_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmsub.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmsub_vf_bf16m2(vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmsub_vf_bf16m2(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmsub_vv_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmsub.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmsub_vv_bf16m4(vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmsub_vv_bf16m4(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmsub_vf_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmsub.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmsub_vf_bf16m4(vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmsub_vf_bf16m4(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmsub_vv_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmsub.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmsub_vv_bf16m8(vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmsub_vv_bf16m8(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmsub_vf_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmsub.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmsub_vf_bf16m8(vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmsub_vf_bf16m8(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmsub_vv_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmsub.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmsub_vv_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmsub_vv_bf16mf4_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmsub_vf_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmsub.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmsub_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmsub_vf_bf16mf4_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmsub_vv_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmsub.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmsub_vv_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmsub_vv_bf16mf2_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmsub_vf_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmsub.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmsub_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmsub_vf_bf16mf2_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmsub_vv_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmsub.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmsub_vv_bf16m1_m(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmsub_vv_bf16m1_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmsub_vf_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmsub.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmsub_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmsub_vf_bf16m1_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmsub_vv_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmsub.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmsub_vv_bf16m2_m(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmsub_vv_bf16m2_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmsub_vf_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmsub.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmsub_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmsub_vf_bf16m2_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmsub_vv_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmsub.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmsub_vv_bf16m4_m(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmsub_vv_bf16m4_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmsub_vf_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmsub.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmsub_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmsub_vf_bf16m4_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmsub_vv_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmsub.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmsub_vv_bf16m8_m(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmsub_vv_bf16m8_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmsub_vf_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmsub.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmsub_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmsub_vf_bf16m8_m(mask, vd, rs1, vs2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfmul.c
new file mode 100644
index 0000000000000..88fb329934365
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfmul.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmul_vv_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmul.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmul_vv_bf16mf4(vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfmul_vv_bf16mf4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmul_vf_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmul.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmul_vf_bf16mf4(vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_vf_bf16mf4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmul_vv_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmul.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmul_vv_bf16mf2(vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfmul_vv_bf16mf2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmul_vf_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmul.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmul_vf_bf16mf2(vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_vf_bf16mf2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmul_vv_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmul.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmul_vv_bf16m1(vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfmul_vv_bf16m1(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmul_vf_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmul.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmul_vf_bf16m1(vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_vf_bf16m1(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmul_vv_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmul.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmul_vv_bf16m2(vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfmul_vv_bf16m2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmul_vf_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmul.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmul_vf_bf16m2(vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_vf_bf16m2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmul_vv_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmul.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmul_vv_bf16m4(vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfmul_vv_bf16m4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmul_vf_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmul.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmul_vf_bf16m4(vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_vf_bf16m4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmul_vv_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmul.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmul_vv_bf16m8(vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfmul_vv_bf16m8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmul_vf_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmul.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmul_vf_bf16m8(vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_vf_bf16m8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmul_vv_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmul.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmul_vv_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfmul_vv_bf16mf4_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmul_vf_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmul.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmul_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_vf_bf16mf4_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmul_vv_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmul.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmul_vv_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfmul_vv_bf16mf2_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmul_vf_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmul.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmul_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_vf_bf16mf2_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmul_vv_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmul.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmul_vv_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfmul_vv_bf16m1_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmul_vf_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmul.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmul_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_vf_bf16m1_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmul_vv_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmul.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmul_vv_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfmul_vv_bf16m2_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmul_vf_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmul.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmul_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_vf_bf16m2_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmul_vv_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmul.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmul_vv_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfmul_vv_bf16m4_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmul_vf_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmul.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmul_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_vf_bf16m4_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmul_vv_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmul.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmul_vv_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfmul_vv_bf16m8_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmul_vf_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmul.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmul_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_vf_bf16m8_m(mask, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfmv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfmv.c
new file mode 100644
index 0000000000000..d80ec3df1bbaa
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfmv.c
@@ -0,0 +1,189 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmv_v_f_bf16mf4(
+// CHECK-RV64-SAME: bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmv.v.f.nxv1bf16.i64(<vscale x 1 x bfloat> poison, bfloat [[SRC]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmv_v_f_bf16mf4(__bf16 src, size_t vl) {
+ return __riscv_vfmv_v_f_bf16mf4(src, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmv_v_f_bf16mf2(
+// CHECK-RV64-SAME: bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmv.v.f.nxv2bf16.i64(<vscale x 2 x bfloat> poison, bfloat [[SRC]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmv_v_f_bf16mf2(__bf16 src, size_t vl) {
+ return __riscv_vfmv_v_f_bf16mf2(src, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmv_v_f_bf16m1(
+// CHECK-RV64-SAME: bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmv.v.f.nxv4bf16.i64(<vscale x 4 x bfloat> poison, bfloat [[SRC]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmv_v_f_bf16m1(__bf16 src, size_t vl) {
+ return __riscv_vfmv_v_f_bf16m1(src, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmv_v_f_bf16m2(
+// CHECK-RV64-SAME: bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmv.v.f.nxv8bf16.i64(<vscale x 8 x bfloat> poison, bfloat [[SRC]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmv_v_f_bf16m2(__bf16 src, size_t vl) {
+ return __riscv_vfmv_v_f_bf16m2(src, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmv_v_f_bf16m4(
+// CHECK-RV64-SAME: bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmv.v.f.nxv16bf16.i64(<vscale x 16 x bfloat> poison, bfloat [[SRC]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmv_v_f_bf16m4(__bf16 src, size_t vl) {
+ return __riscv_vfmv_v_f_bf16m4(src, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmv_v_f_bf16m8(
+// CHECK-RV64-SAME: bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmv.v.f.nxv32bf16.i64(<vscale x 32 x bfloat> poison, bfloat [[SRC]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmv_v_f_bf16m8(__bf16 src, size_t vl) {
+ return __riscv_vfmv_v_f_bf16m8(src, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local bfloat @test_vfmv_f_s_bf16mf4_bf16(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[SRC:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call bfloat @llvm.riscv.vfmv.f.s.nxv1bf16(<vscale x 1 x bfloat> [[SRC]])
+// CHECK-RV64-NEXT: ret bfloat [[TMP0]]
+//
+__bf16 test_vfmv_f_s_bf16mf4_bf16(vbfloat16mf4_t src) {
+ return __riscv_vfmv_f_s_bf16mf4_bf16(src);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmv_s_f_bf16mf4(
+// CHECK-RV64-SAME: bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmv.s.f.nxv1bf16.i64(<vscale x 1 x bfloat> poison, bfloat [[SRC]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmv_s_f_bf16mf4(__bf16 src, size_t vl) {
+ return __riscv_vfmv_s_f_bf16mf4(src, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local bfloat @test_vfmv_f_s_bf16mf2_bf16(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[SRC:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call bfloat @llvm.riscv.vfmv.f.s.nxv2bf16(<vscale x 2 x bfloat> [[SRC]])
+// CHECK-RV64-NEXT: ret bfloat [[TMP0]]
+//
+__bf16 test_vfmv_f_s_bf16mf2_bf16(vbfloat16mf2_t src) {
+ return __riscv_vfmv_f_s_bf16mf2_bf16(src);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmv_s_f_bf16mf2(
+// CHECK-RV64-SAME: bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmv.s.f.nxv2bf16.i64(<vscale x 2 x bfloat> poison, bfloat [[SRC]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmv_s_f_bf16mf2(__bf16 src, size_t vl) {
+ return __riscv_vfmv_s_f_bf16mf2(src, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local bfloat @test_vfmv_f_s_bf16m1_bf16(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[SRC:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call bfloat @llvm.riscv.vfmv.f.s.nxv4bf16(<vscale x 4 x bfloat> [[SRC]])
+// CHECK-RV64-NEXT: ret bfloat [[TMP0]]
+//
+__bf16 test_vfmv_f_s_bf16m1_bf16(vbfloat16m1_t src) {
+ return __riscv_vfmv_f_s_bf16m1_bf16(src);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmv_s_f_bf16m1(
+// CHECK-RV64-SAME: bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmv.s.f.nxv4bf16.i64(<vscale x 4 x bfloat> poison, bfloat [[SRC]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmv_s_f_bf16m1(__bf16 src, size_t vl) {
+ return __riscv_vfmv_s_f_bf16m1(src, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local bfloat @test_vfmv_f_s_bf16m2_bf16(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[SRC:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call bfloat @llvm.riscv.vfmv.f.s.nxv8bf16(<vscale x 8 x bfloat> [[SRC]])
+// CHECK-RV64-NEXT: ret bfloat [[TMP0]]
+//
+__bf16 test_vfmv_f_s_bf16m2_bf16(vbfloat16m2_t src) {
+ return __riscv_vfmv_f_s_bf16m2_bf16(src);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmv_s_f_bf16m2(
+// CHECK-RV64-SAME: bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmv.s.f.nxv8bf16.i64(<vscale x 8 x bfloat> poison, bfloat [[SRC]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmv_s_f_bf16m2(__bf16 src, size_t vl) {
+ return __riscv_vfmv_s_f_bf16m2(src, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local bfloat @test_vfmv_f_s_bf16m4_bf16(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[SRC:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call bfloat @llvm.riscv.vfmv.f.s.nxv16bf16(<vscale x 16 x bfloat> [[SRC]])
+// CHECK-RV64-NEXT: ret bfloat [[TMP0]]
+//
+__bf16 test_vfmv_f_s_bf16m4_bf16(vbfloat16m4_t src) {
+ return __riscv_vfmv_f_s_bf16m4_bf16(src);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmv_s_f_bf16m4(
+// CHECK-RV64-SAME: bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmv.s.f.nxv16bf16.i64(<vscale x 16 x bfloat> poison, bfloat [[SRC]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmv_s_f_bf16m4(__bf16 src, size_t vl) {
+ return __riscv_vfmv_s_f_bf16m4(src, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local bfloat @test_vfmv_f_s_bf16m8_bf16(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[SRC:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call bfloat @llvm.riscv.vfmv.f.s.nxv32bf16(<vscale x 32 x bfloat> [[SRC]])
+// CHECK-RV64-NEXT: ret bfloat [[TMP0]]
+//
+__bf16 test_vfmv_f_s_bf16m8_bf16(vbfloat16m8_t src) {
+ return __riscv_vfmv_f_s_bf16m8_bf16(src);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmv_s_f_bf16m8(
+// CHECK-RV64-SAME: bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmv.s.f.nxv32bf16.i64(<vscale x 32 x bfloat> poison, bfloat [[SRC]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmv_s_f_bf16m8(__bf16 src, size_t vl) {
+ return __riscv_vfmv_s_f_bf16m8(src, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfncvt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfncvt.c
new file mode 100644
index 0000000000000..a5afab9bec1ec
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfncvt.c
@@ -0,0 +1,724 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_x_f_w_bf16mf4_i8mf8(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.x.f.w.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> poison, <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vfncvt_x_f_w_bf16mf4_i8mf8(vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16mf4_i8mf8(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_x_f_w_bf16mf2_i8mf4(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.x.f.w.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> poison, <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vfncvt_x_f_w_bf16mf2_i8mf4(vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16mf2_i8mf4(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_x_f_w_bf16m1_i8mf2(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.x.f.w.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> poison, <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vfncvt_x_f_w_bf16m1_i8mf2(vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16m1_i8mf2(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_x_f_w_bf16m2_i8m1(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.x.f.w.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> poison, <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vfncvt_x_f_w_bf16m2_i8m1(vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16m2_i8m1(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_x_f_w_bf16m4_i8m2(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.x.f.w.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> poison, <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vfncvt_x_f_w_bf16m4_i8m2(vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16m4_i8m2(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_x_f_w_bf16m8_i8m4(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.x.f.w.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> poison, <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vfncvt_x_f_w_bf16m8_i8m4(vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16m8_i8m4(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_xu_f_w_bf16mf4_u8mf8(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> poison, <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vfncvt_xu_f_w_bf16mf4_u8mf8(vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16mf4_u8mf8(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_xu_f_w_bf16mf2_u8mf4(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> poison, <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vfncvt_xu_f_w_bf16mf2_u8mf4(vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16mf2_u8mf4(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_xu_f_w_bf16m1_u8mf2(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> poison, <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vfncvt_xu_f_w_bf16m1_u8mf2(vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16m1_u8mf2(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_xu_f_w_bf16m2_u8m1(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> poison, <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vfncvt_xu_f_w_bf16m2_u8m1(vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16m2_u8m1(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_xu_f_w_bf16m4_u8m2(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> poison, <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vfncvt_xu_f_w_bf16m4_u8m2(vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16m4_u8m2(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_xu_f_w_bf16m8_u8m4(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> poison, <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vfncvt_xu_f_w_bf16m8_u8m4(vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16m8_u8m4(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfncvt_f_f_w_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfncvt.f.f.w.nxv1bf16.nxv1f32.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x float> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfncvt_f_f_w_bf16mf4(vfloat32mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16mf4(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfncvt_f_f_w_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfncvt.f.f.w.nxv2bf16.nxv2f32.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x float> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfncvt_f_f_w_bf16mf2(vfloat32m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16mf2(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfncvt_f_f_w_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfncvt.f.f.w.nxv4bf16.nxv4f32.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x float> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfncvt_f_f_w_bf16m1(vfloat32m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16m1(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfncvt_f_f_w_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfncvt.f.f.w.nxv8bf16.nxv8f32.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x float> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfncvt_f_f_w_bf16m2(vfloat32m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16m2(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfncvt_f_f_w_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfncvt.f.f.w.nxv16bf16.nxv16f32.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x float> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfncvt_f_f_w_bf16m4(vfloat32m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16m4(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_x_f_w_bf16mf4_i8mf8_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> poison, <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vfncvt_x_f_w_bf16mf4_i8mf8_m(vbool64_t vm, vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16mf4_i8mf8_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_x_f_w_bf16mf2_i8mf4_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> poison, <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vfncvt_x_f_w_bf16mf2_i8mf4_m(vbool32_t vm, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16mf2_i8mf4_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_x_f_w_bf16m1_i8mf2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> poison, <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vfncvt_x_f_w_bf16m1_i8mf2_m(vbool16_t vm, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16m1_i8mf2_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_x_f_w_bf16m2_i8m1_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> poison, <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vfncvt_x_f_w_bf16m2_i8m1_m(vbool8_t vm, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16m2_i8m1_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_x_f_w_bf16m4_i8m2_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> poison, <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vfncvt_x_f_w_bf16m4_i8m2_m(vbool4_t vm, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16m4_i8m2_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_x_f_w_bf16m8_i8m4_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> poison, <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vfncvt_x_f_w_bf16m8_i8m4_m(vbool2_t vm, vbfloat16m8_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16m8_i8m4_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_xu_f_w_bf16mf4_u8mf8_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> poison, <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vfncvt_xu_f_w_bf16mf4_u8mf8_m(vbool64_t vm, vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16mf4_u8mf8_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_xu_f_w_bf16mf2_u8mf4_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> poison, <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vfncvt_xu_f_w_bf16mf2_u8mf4_m(vbool32_t vm, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16mf2_u8mf4_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_xu_f_w_bf16m1_u8mf2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> poison, <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vfncvt_xu_f_w_bf16m1_u8mf2_m(vbool16_t vm, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16m1_u8mf2_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_xu_f_w_bf16m2_u8m1_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> poison, <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vfncvt_xu_f_w_bf16m2_u8m1_m(vbool8_t vm, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16m2_u8m1_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_xu_f_w_bf16m4_u8m2_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> poison, <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vfncvt_xu_f_w_bf16m4_u8m2_m(vbool4_t vm, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16m4_u8m2_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_xu_f_w_bf16m8_u8m4_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> poison, <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vfncvt_xu_f_w_bf16m8_u8m4_m(vbool2_t vm, vbfloat16m8_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16m8_u8m4_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfncvt_f_f_w_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv1bf16.nxv1f32.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x float> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfncvt_f_f_w_bf16mf4_m(vbool64_t vm, vfloat32mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16mf4_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfncvt_f_f_w_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv2bf16.nxv2f32.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x float> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfncvt_f_f_w_bf16mf2_m(vbool32_t vm, vfloat32m1_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16mf2_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfncvt_f_f_w_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv4bf16.nxv4f32.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x float> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfncvt_f_f_w_bf16m1_m(vbool16_t vm, vfloat32m2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16m1_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfncvt_f_f_w_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv8bf16.nxv8f32.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x float> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfncvt_f_f_w_bf16m2_m(vbool8_t vm, vfloat32m4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16m2_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfncvt_f_f_w_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv16bf16.nxv16f32.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x float> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfncvt_f_f_w_bf16m4_m(vbool4_t vm, vfloat32m8_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16m4_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_x_f_w_bf16mf4_i8mf8_rm(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.x.f.w.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> poison, <vscale x 1 x bfloat> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vfncvt_x_f_w_bf16mf4_i8mf8_rm(vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16mf4_i8mf8_rm(vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_x_f_w_bf16mf2_i8mf4_rm(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.x.f.w.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> poison, <vscale x 2 x bfloat> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vfncvt_x_f_w_bf16mf2_i8mf4_rm(vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16mf2_i8mf4_rm(vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_x_f_w_bf16m1_i8mf2_rm(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.x.f.w.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> poison, <vscale x 4 x bfloat> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vfncvt_x_f_w_bf16m1_i8mf2_rm(vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16m1_i8mf2_rm(vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_x_f_w_bf16m2_i8m1_rm(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.x.f.w.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> poison, <vscale x 8 x bfloat> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vfncvt_x_f_w_bf16m2_i8m1_rm(vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16m2_i8m1_rm(vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_x_f_w_bf16m4_i8m2_rm(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.x.f.w.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> poison, <vscale x 16 x bfloat> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vfncvt_x_f_w_bf16m4_i8m2_rm(vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16m4_i8m2_rm(vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_x_f_w_bf16m8_i8m4_rm(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.x.f.w.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> poison, <vscale x 32 x bfloat> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vfncvt_x_f_w_bf16m8_i8m4_rm(vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16m8_i8m4_rm(vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_xu_f_w_bf16mf4_u8mf8_rm(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> poison, <vscale x 1 x bfloat> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vfncvt_xu_f_w_bf16mf4_u8mf8_rm(vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16mf4_u8mf8_rm(vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_xu_f_w_bf16mf2_u8mf4_rm(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> poison, <vscale x 2 x bfloat> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vfncvt_xu_f_w_bf16mf2_u8mf4_rm(vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16mf2_u8mf4_rm(vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_xu_f_w_bf16m1_u8mf2_rm(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> poison, <vscale x 4 x bfloat> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vfncvt_xu_f_w_bf16m1_u8mf2_rm(vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16m1_u8mf2_rm(vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_xu_f_w_bf16m2_u8m1_rm(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> poison, <vscale x 8 x bfloat> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vfncvt_xu_f_w_bf16m2_u8m1_rm(vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16m2_u8m1_rm(vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_xu_f_w_bf16m4_u8m2_rm(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> poison, <vscale x 16 x bfloat> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vfncvt_xu_f_w_bf16m4_u8m2_rm(vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16m4_u8m2_rm(vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_xu_f_w_bf16m8_u8m4_rm(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> poison, <vscale x 32 x bfloat> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vfncvt_xu_f_w_bf16m8_u8m4_rm(vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16m8_u8m4_rm(vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfncvt_f_f_w_bf16mf4_rm(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfncvt.f.f.w.nxv1bf16.nxv1f32.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x float> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfncvt_f_f_w_bf16mf4_rm(vfloat32mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16mf4_rm(vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfncvt_f_f_w_bf16mf2_rm(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfncvt.f.f.w.nxv2bf16.nxv2f32.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x float> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfncvt_f_f_w_bf16mf2_rm(vfloat32m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16mf2_rm(vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfncvt_f_f_w_bf16m1_rm(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfncvt.f.f.w.nxv4bf16.nxv4f32.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x float> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfncvt_f_f_w_bf16m1_rm(vfloat32m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16m1_rm(vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfncvt_f_f_w_bf16m2_rm(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfncvt.f.f.w.nxv8bf16.nxv8f32.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x float> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfncvt_f_f_w_bf16m2_rm(vfloat32m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16m2_rm(vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfncvt_f_f_w_bf16m4_rm(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfncvt.f.f.w.nxv16bf16.nxv16f32.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x float> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfncvt_f_f_w_bf16m4_rm(vfloat32m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16m4_rm(vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_x_f_w_bf16mf4_i8mf8_rm_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> poison, <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vfncvt_x_f_w_bf16mf4_i8mf8_rm_m(vbool64_t vm,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16mf4_i8mf8_rm_m(vm, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_x_f_w_bf16mf2_i8mf4_rm_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> poison, <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vfncvt_x_f_w_bf16mf2_i8mf4_rm_m(vbool32_t vm,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16mf2_i8mf4_rm_m(vm, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_x_f_w_bf16m1_i8mf2_rm_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> poison, <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vfncvt_x_f_w_bf16m1_i8mf2_rm_m(vbool16_t vm, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16m1_i8mf2_rm_m(vm, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_x_f_w_bf16m2_i8m1_rm_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> poison, <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vfncvt_x_f_w_bf16m2_i8m1_rm_m(vbool8_t vm, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16m2_i8m1_rm_m(vm, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_x_f_w_bf16m4_i8m2_rm_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> poison, <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vfncvt_x_f_w_bf16m4_i8m2_rm_m(vbool4_t vm, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16m4_i8m2_rm_m(vm, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_x_f_w_bf16m8_i8m4_rm_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> poison, <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vfncvt_x_f_w_bf16m8_i8m4_rm_m(vbool2_t vm, vbfloat16m8_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16m8_i8m4_rm_m(vm, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_xu_f_w_bf16mf4_u8mf8_rm_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> poison, <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vfncvt_xu_f_w_bf16mf4_u8mf8_rm_m(vbool64_t vm,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16mf4_u8mf8_rm_m(vm, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_xu_f_w_bf16mf2_u8mf4_rm_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> poison, <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vfncvt_xu_f_w_bf16mf2_u8mf4_rm_m(vbool32_t vm,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16mf2_u8mf4_rm_m(vm, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_xu_f_w_bf16m1_u8mf2_rm_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> poison, <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vfncvt_xu_f_w_bf16m1_u8mf2_rm_m(vbool16_t vm,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16m1_u8mf2_rm_m(vm, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_xu_f_w_bf16m2_u8m1_rm_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> poison, <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vfncvt_xu_f_w_bf16m2_u8m1_rm_m(vbool8_t vm, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16m2_u8m1_rm_m(vm, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_xu_f_w_bf16m4_u8m2_rm_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> poison, <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vfncvt_xu_f_w_bf16m4_u8m2_rm_m(vbool4_t vm, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16m4_u8m2_rm_m(vm, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_xu_f_w_bf16m8_u8m4_rm_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> poison, <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vfncvt_xu_f_w_bf16m8_u8m4_rm_m(vbool2_t vm, vbfloat16m8_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16m8_u8m4_rm_m(vm, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfncvt_f_f_w_bf16mf4_rm_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv1bf16.nxv1f32.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x float> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfncvt_f_f_w_bf16mf4_rm_m(vbool64_t vm, vfloat32mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16mf4_rm_m(vm, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfncvt_f_f_w_bf16mf2_rm_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv2bf16.nxv2f32.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x float> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfncvt_f_f_w_bf16mf2_rm_m(vbool32_t vm, vfloat32m1_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16mf2_rm_m(vm, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfncvt_f_f_w_bf16m1_rm_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv4bf16.nxv4f32.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x float> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfncvt_f_f_w_bf16m1_rm_m(vbool16_t vm, vfloat32m2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16m1_rm_m(vm, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfncvt_f_f_w_bf16m2_rm_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv8bf16.nxv8f32.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x float> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfncvt_f_f_w_bf16m2_rm_m(vbool8_t vm, vfloat32m4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16m2_rm_m(vm, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfncvt_f_f_w_bf16m4_rm_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv16bf16.nxv16f32.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x float> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfncvt_f_f_w_bf16m4_rm_m(vbool4_t vm, vfloat32m8_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16m4_rm_m(vm, vs2, __RISCV_FRM_RNE, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfncvt_rod.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfncvt_rod.c
new file mode 100644
index 0000000000000..70c377bba1dfb
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfncvt_rod.c
@@ -0,0 +1,113 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfncvt_rod_f_f_w_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.nxv1bf16.nxv1f32.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x float> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfncvt_rod_f_f_w_bf16mf4(vfloat32mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_rod_f_f_w_bf16mf4(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfncvt_rod_f_f_w_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.nxv2bf16.nxv2f32.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x float> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfncvt_rod_f_f_w_bf16mf2(vfloat32m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_rod_f_f_w_bf16mf2(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfncvt_rod_f_f_w_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.nxv4bf16.nxv4f32.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x float> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfncvt_rod_f_f_w_bf16m1(vfloat32m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_rod_f_f_w_bf16m1(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfncvt_rod_f_f_w_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.nxv8bf16.nxv8f32.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x float> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfncvt_rod_f_f_w_bf16m2(vfloat32m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_rod_f_f_w_bf16m2(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfncvt_rod_f_f_w_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.nxv16bf16.nxv16f32.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x float> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfncvt_rod_f_f_w_bf16m4(vfloat32m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_rod_f_f_w_bf16m4(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfncvt_rod_f_f_w_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv1bf16.nxv1f32.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x float> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfncvt_rod_f_f_w_bf16mf4_m(vbool64_t vm, vfloat32mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rod_f_f_w_bf16mf4_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfncvt_rod_f_f_w_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv2bf16.nxv2f32.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x float> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfncvt_rod_f_f_w_bf16mf2_m(vbool32_t vm, vfloat32m1_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rod_f_f_w_bf16mf2_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfncvt_rod_f_f_w_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv4bf16.nxv4f32.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x float> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfncvt_rod_f_f_w_bf16m1_m(vbool16_t vm, vfloat32m2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rod_f_f_w_bf16m1_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfncvt_rod_f_f_w_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv8bf16.nxv8f32.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x float> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfncvt_rod_f_f_w_bf16m2_m(vbool8_t vm, vfloat32m4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rod_f_f_w_bf16m2_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfncvt_rod_f_f_w_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv16bf16.nxv16f32.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x float> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfncvt_rod_f_f_w_bf16m4_m(vbool4_t vm, vfloat32m8_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rod_f_f_w_bf16m4_m(vm, vs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfncvt_rtz.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfncvt_rtz.c
new file mode 100644
index 0000000000000..854e9868109e4
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfncvt_rtz.c
@@ -0,0 +1,267 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_rtz_x_f_w_bf16mf4_i8mf8(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> poison, <vscale x 1 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vfncvt_rtz_x_f_w_bf16mf4_i8mf8(vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_x_f_w_bf16mf4_i8mf8(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_rtz_x_f_w_bf16mf2_i8mf4(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> poison, <vscale x 2 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vfncvt_rtz_x_f_w_bf16mf2_i8mf4(vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_x_f_w_bf16mf2_i8mf4(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_rtz_x_f_w_bf16m1_i8mf2(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> poison, <vscale x 4 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vfncvt_rtz_x_f_w_bf16m1_i8mf2(vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_x_f_w_bf16m1_i8mf2(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_rtz_x_f_w_bf16m2_i8m1(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> poison, <vscale x 8 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vfncvt_rtz_x_f_w_bf16m2_i8m1(vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_x_f_w_bf16m2_i8m1(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_rtz_x_f_w_bf16m4_i8m2(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> poison, <vscale x 16 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vfncvt_rtz_x_f_w_bf16m4_i8m2(vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_x_f_w_bf16m4_i8m2(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_rtz_x_f_w_bf16m8_i8m4(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> poison, <vscale x 32 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vfncvt_rtz_x_f_w_bf16m8_i8m4(vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_x_f_w_bf16m8_i8m4(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_rtz_xu_f_w_bf16mf4_u8mf8(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> poison, <vscale x 1 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vfncvt_rtz_xu_f_w_bf16mf4_u8mf8(vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu_f_w_bf16mf4_u8mf8(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_rtz_xu_f_w_bf16mf2_u8mf4(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> poison, <vscale x 2 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vfncvt_rtz_xu_f_w_bf16mf2_u8mf4(vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu_f_w_bf16mf2_u8mf4(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_rtz_xu_f_w_bf16m1_u8mf2(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> poison, <vscale x 4 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vfncvt_rtz_xu_f_w_bf16m1_u8mf2(vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_xu_f_w_bf16m1_u8mf2(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_rtz_xu_f_w_bf16m2_u8m1(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> poison, <vscale x 8 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vfncvt_rtz_xu_f_w_bf16m2_u8m1(vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_xu_f_w_bf16m2_u8m1(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_rtz_xu_f_w_bf16m4_u8m2(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> poison, <vscale x 16 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vfncvt_rtz_xu_f_w_bf16m4_u8m2(vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_xu_f_w_bf16m4_u8m2(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_rtz_xu_f_w_bf16m8_u8m4(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> poison, <vscale x 32 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vfncvt_rtz_xu_f_w_bf16m8_u8m4(vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_xu_f_w_bf16m8_u8m4(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_rtz_x_f_w_bf16mf4_i8mf8_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> poison, <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vfncvt_rtz_x_f_w_bf16mf4_i8mf8_m(vbool64_t vm,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_x_f_w_bf16mf4_i8mf8_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_rtz_x_f_w_bf16mf2_i8mf4_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> poison, <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vfncvt_rtz_x_f_w_bf16mf2_i8mf4_m(vbool32_t vm,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_x_f_w_bf16mf2_i8mf4_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_rtz_x_f_w_bf16m1_i8mf2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> poison, <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vfncvt_rtz_x_f_w_bf16m1_i8mf2_m(vbool16_t vm, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_x_f_w_bf16m1_i8mf2_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_rtz_x_f_w_bf16m2_i8m1_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> poison, <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vfncvt_rtz_x_f_w_bf16m2_i8m1_m(vbool8_t vm, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_x_f_w_bf16m2_i8m1_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_rtz_x_f_w_bf16m4_i8m2_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> poison, <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vfncvt_rtz_x_f_w_bf16m4_i8m2_m(vbool4_t vm, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_x_f_w_bf16m4_i8m2_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_rtz_x_f_w_bf16m8_i8m4_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> poison, <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vfncvt_rtz_x_f_w_bf16m8_i8m4_m(vbool2_t vm, vbfloat16m8_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_x_f_w_bf16m8_i8m4_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_rtz_xu_f_w_bf16mf4_u8mf8_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> poison, <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vfncvt_rtz_xu_f_w_bf16mf4_u8mf8_m(vbool64_t vm,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu_f_w_bf16mf4_u8mf8_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_rtz_xu_f_w_bf16mf2_u8mf4_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> poison, <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vfncvt_rtz_xu_f_w_bf16mf2_u8mf4_m(vbool32_t vm,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu_f_w_bf16mf2_u8mf4_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_rtz_xu_f_w_bf16m1_u8mf2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> poison, <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vfncvt_rtz_xu_f_w_bf16m1_u8mf2_m(vbool16_t vm,
+ vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu_f_w_bf16m1_u8mf2_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_rtz_xu_f_w_bf16m2_u8m1_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> poison, <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vfncvt_rtz_xu_f_w_bf16m2_u8m1_m(vbool8_t vm, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu_f_w_bf16m2_u8m1_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_rtz_xu_f_w_bf16m4_u8m2_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> poison, <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vfncvt_rtz_xu_f_w_bf16m4_u8m2_m(vbool4_t vm, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu_f_w_bf16m4_u8m2_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_rtz_xu_f_w_bf16m8_u8m4_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> poison, <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vfncvt_rtz_xu_f_w_bf16m8_u8m4_m(vbool2_t vm, vbfloat16m8_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu_f_w_bf16m8_u8m4_m(vm, vs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfnmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfnmacc.c
new file mode 100644
index 0000000000000..18484883a14f4
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfnmacc.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmacc_vv_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmacc.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmacc_vv_bf16mf4(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vv_bf16mf4(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmacc_vf_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmacc.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmacc_vf_bf16mf4(vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vf_bf16mf4(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmacc_vv_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmacc.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmacc_vv_bf16mf2(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vv_bf16mf2(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmacc_vf_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmacc.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmacc_vf_bf16mf2(vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vf_bf16mf2(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmacc_vv_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmacc.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmacc_vv_bf16m1(vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vv_bf16m1(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmacc_vf_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmacc.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmacc_vf_bf16m1(vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vf_bf16m1(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmacc_vv_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmacc.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmacc_vv_bf16m2(vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vv_bf16m2(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmacc_vf_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmacc.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmacc_vf_bf16m2(vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vf_bf16m2(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmacc_vv_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmacc.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmacc_vv_bf16m4(vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vv_bf16m4(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmacc_vf_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmacc.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmacc_vf_bf16m4(vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vf_bf16m4(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmacc_vv_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmacc.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmacc_vv_bf16m8(vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vv_bf16m8(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmacc_vf_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmacc.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmacc_vf_bf16m8(vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vf_bf16m8(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmacc_vv_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmacc.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmacc_vv_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vv_bf16mf4_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmacc_vf_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmacc.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmacc_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vf_bf16mf4_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmacc_vv_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmacc.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmacc_vv_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vv_bf16mf2_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmacc_vf_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmacc.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmacc_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vf_bf16mf2_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmacc_vv_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmacc.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmacc_vv_bf16m1_m(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vv_bf16m1_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmacc_vf_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmacc.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmacc_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vf_bf16m1_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmacc_vv_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmacc.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmacc_vv_bf16m2_m(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vv_bf16m2_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmacc_vf_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmacc.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmacc_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vf_bf16m2_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmacc_vv_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmacc.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmacc_vv_bf16m4_m(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vv_bf16m4_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmacc_vf_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmacc.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmacc_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vf_bf16m4_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmacc_vv_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmacc.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmacc_vv_bf16m8_m(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vv_bf16m8_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmacc_vf_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmacc.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmacc_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vf_bf16m8_m(mask, vd, rs1, vs2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfnmadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfnmadd.c
new file mode 100644
index 0000000000000..e519e5acb4575
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfnmadd.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmadd_vv_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmadd.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmadd_vv_bf16mf4(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vv_bf16mf4(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmadd_vf_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmadd.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmadd_vf_bf16mf4(vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vf_bf16mf4(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmadd_vv_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmadd.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmadd_vv_bf16mf2(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vv_bf16mf2(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmadd_vf_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmadd.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmadd_vf_bf16mf2(vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vf_bf16mf2(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmadd_vv_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmadd.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmadd_vv_bf16m1(vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vv_bf16m1(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmadd_vf_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmadd.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmadd_vf_bf16m1(vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vf_bf16m1(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmadd_vv_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmadd.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmadd_vv_bf16m2(vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vv_bf16m2(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmadd_vf_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmadd.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmadd_vf_bf16m2(vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vf_bf16m2(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmadd_vv_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmadd.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmadd_vv_bf16m4(vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vv_bf16m4(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmadd_vf_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmadd.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmadd_vf_bf16m4(vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vf_bf16m4(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmadd_vv_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmadd.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmadd_vv_bf16m8(vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vv_bf16m8(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmadd_vf_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmadd.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmadd_vf_bf16m8(vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vf_bf16m8(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmadd_vv_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmadd.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmadd_vv_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vv_bf16mf4_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmadd_vf_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmadd.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmadd_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vf_bf16mf4_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmadd_vv_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmadd.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmadd_vv_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vv_bf16mf2_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmadd_vf_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmadd.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmadd_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vf_bf16mf2_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmadd_vv_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmadd.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmadd_vv_bf16m1_m(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vv_bf16m1_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmadd_vf_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmadd.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmadd_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vf_bf16m1_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmadd_vv_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmadd.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmadd_vv_bf16m2_m(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vv_bf16m2_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmadd_vf_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmadd.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmadd_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vf_bf16m2_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmadd_vv_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmadd.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmadd_vv_bf16m4_m(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vv_bf16m4_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmadd_vf_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmadd.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmadd_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vf_bf16m4_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmadd_vv_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmadd.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmadd_vv_bf16m8_m(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vv_bf16m8_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmadd_vf_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmadd.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmadd_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vf_bf16m8_m(mask, vd, rs1, vs2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfnmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfnmsac.c
new file mode 100644
index 0000000000000..47e1f44f5a45f
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfnmsac.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmsac_vv_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsac.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmsac_vv_bf16mf4(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vv_bf16mf4(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmsac_vf_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsac.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmsac_vf_bf16mf4(vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vf_bf16mf4(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmsac_vv_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsac.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmsac_vv_bf16mf2(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vv_bf16mf2(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmsac_vf_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsac.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmsac_vf_bf16mf2(vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vf_bf16mf2(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmsac_vv_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsac.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmsac_vv_bf16m1(vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vv_bf16m1(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmsac_vf_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsac.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmsac_vf_bf16m1(vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vf_bf16m1(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmsac_vv_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsac.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmsac_vv_bf16m2(vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vv_bf16m2(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmsac_vf_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsac.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmsac_vf_bf16m2(vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vf_bf16m2(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmsac_vv_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsac.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmsac_vv_bf16m4(vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vv_bf16m4(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmsac_vf_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsac.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmsac_vf_bf16m4(vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vf_bf16m4(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmsac_vv_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmsac.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmsac_vv_bf16m8(vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vv_bf16m8(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmsac_vf_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmsac.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmsac_vf_bf16m8(vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vf_bf16m8(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmsac_vv_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsac.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmsac_vv_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vv_bf16mf4_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmsac_vf_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsac.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmsac_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vf_bf16mf4_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmsac_vv_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsac.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmsac_vv_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vv_bf16mf2_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmsac_vf_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsac.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmsac_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vf_bf16mf2_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmsac_vv_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsac.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmsac_vv_bf16m1_m(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vv_bf16m1_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmsac_vf_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsac.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmsac_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vf_bf16m1_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmsac_vv_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsac.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmsac_vv_bf16m2_m(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vv_bf16m2_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmsac_vf_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsac.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmsac_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vf_bf16m2_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmsac_vv_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsac.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmsac_vv_bf16m4_m(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vv_bf16m4_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmsac_vf_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsac.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmsac_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vf_bf16m4_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmsac_vv_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmsac.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmsac_vv_bf16m8_m(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vv_bf16m8_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmsac_vf_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmsac.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmsac_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vf_bf16m8_m(mask, vd, rs1, vs2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfnmsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfnmsub.c
new file mode 100644
index 0000000000000..4b55b64542c61
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfnmsub.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmsub_vv_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsub.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmsub_vv_bf16mf4(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vv_bf16mf4(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmsub_vf_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsub.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmsub_vf_bf16mf4(vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vf_bf16mf4(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmsub_vv_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsub.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmsub_vv_bf16mf2(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vv_bf16mf2(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmsub_vf_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsub.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmsub_vf_bf16mf2(vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vf_bf16mf2(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmsub_vv_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsub.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmsub_vv_bf16m1(vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vv_bf16m1(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmsub_vf_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsub.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmsub_vf_bf16m1(vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vf_bf16m1(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmsub_vv_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsub.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmsub_vv_bf16m2(vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vv_bf16m2(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmsub_vf_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsub.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmsub_vf_bf16m2(vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vf_bf16m2(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmsub_vv_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsub.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmsub_vv_bf16m4(vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vv_bf16m4(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmsub_vf_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsub.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmsub_vf_bf16m4(vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vf_bf16m4(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmsub_vv_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmsub.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmsub_vv_bf16m8(vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vv_bf16m8(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmsub_vf_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmsub.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmsub_vf_bf16m8(vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vf_bf16m8(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmsub_vv_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsub.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmsub_vv_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vv_bf16mf4_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmsub_vf_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsub.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmsub_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vf_bf16mf4_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmsub_vv_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsub.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmsub_vv_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vv_bf16mf2_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmsub_vf_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsub.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmsub_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vf_bf16mf2_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmsub_vv_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsub.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmsub_vv_bf16m1_m(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vv_bf16m1_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmsub_vf_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsub.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmsub_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vf_bf16m1_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmsub_vv_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsub.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmsub_vv_bf16m2_m(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vv_bf16m2_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmsub_vf_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsub.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmsub_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vf_bf16m2_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmsub_vv_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsub.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmsub_vv_bf16m4_m(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vv_bf16m4_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmsub_vf_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsub.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmsub_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vf_bf16m4_m(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmsub_vv_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmsub.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmsub_vv_bf16m8_m(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vv_bf16m8_m(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmsub_vf_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmsub.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmsub_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vf_bf16m8_m(mask, vd, rs1, vs2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfrec7.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfrec7.c
new file mode 100644
index 0000000000000..1ffee73e91d04
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfrec7.c
@@ -0,0 +1,129 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfrec7_v_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfrec7.nxv1bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfrec7_v_bf16mf4(vbfloat16mf4_t op1, size_t vl) {
+ return __riscv_vfrec7_v_bf16mf4(op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfrec7_v_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfrec7.nxv2bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfrec7_v_bf16mf2(vbfloat16mf2_t op1, size_t vl) {
+ return __riscv_vfrec7_v_bf16mf2(op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfrec7_v_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfrec7.nxv4bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfrec7_v_bf16m1(vbfloat16m1_t op1, size_t vl) {
+ return __riscv_vfrec7_v_bf16m1(op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfrec7_v_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfrec7.nxv8bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfrec7_v_bf16m2(vbfloat16m2_t op1, size_t vl) {
+ return __riscv_vfrec7_v_bf16m2(op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfrec7_v_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfrec7.nxv16bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfrec7_v_bf16m4(vbfloat16m4_t op1, size_t vl) {
+ return __riscv_vfrec7_v_bf16m4(op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfrec7_v_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfrec7.nxv32bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfrec7_v_bf16m8(vbfloat16m8_t op1, size_t vl) {
+ return __riscv_vfrec7_v_bf16m8(op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfrec7_v_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfrec7.mask.nxv1bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfrec7_v_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, size_t vl) {
+ return __riscv_vfrec7_v_bf16mf4_m(mask, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfrec7_v_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfrec7.mask.nxv2bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfrec7_v_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, size_t vl) {
+ return __riscv_vfrec7_v_bf16mf2_m(mask, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfrec7_v_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfrec7.mask.nxv4bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfrec7_v_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, size_t vl) {
+ return __riscv_vfrec7_v_bf16m1_m(mask, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfrec7_v_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfrec7.mask.nxv8bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfrec7_v_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, size_t vl) {
+ return __riscv_vfrec7_v_bf16m2_m(mask, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfrec7_v_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfrec7.mask.nxv16bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfrec7_v_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, size_t vl) {
+ return __riscv_vfrec7_v_bf16m4_m(mask, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfrec7_v_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfrec7.mask.nxv32bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfrec7_v_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, size_t vl) {
+ return __riscv_vfrec7_v_bf16m8_m(mask, op1, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfrsqrt7.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfrsqrt7.c
new file mode 100644
index 0000000000000..964c4869622aa
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfrsqrt7.c
@@ -0,0 +1,129 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfrsqrt7_v_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfrsqrt7.nxv1bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfrsqrt7_v_bf16mf4(vbfloat16mf4_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_v_bf16mf4(op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfrsqrt7_v_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfrsqrt7.nxv2bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfrsqrt7_v_bf16mf2(vbfloat16mf2_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_v_bf16mf2(op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfrsqrt7_v_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfrsqrt7.nxv4bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfrsqrt7_v_bf16m1(vbfloat16m1_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_v_bf16m1(op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfrsqrt7_v_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfrsqrt7.nxv8bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfrsqrt7_v_bf16m2(vbfloat16m2_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_v_bf16m2(op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfrsqrt7_v_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfrsqrt7.nxv16bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfrsqrt7_v_bf16m4(vbfloat16m4_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_v_bf16m4(op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfrsqrt7_v_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfrsqrt7.nxv32bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfrsqrt7_v_bf16m8(vbfloat16m8_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_v_bf16m8(op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfrsqrt7_v_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv1bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfrsqrt7_v_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_v_bf16mf4_m(mask, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfrsqrt7_v_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv2bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfrsqrt7_v_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_v_bf16mf2_m(mask, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfrsqrt7_v_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv4bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfrsqrt7_v_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_v_bf16m1_m(mask, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfrsqrt7_v_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv8bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfrsqrt7_v_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_v_bf16m2_m(mask, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfrsqrt7_v_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv16bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfrsqrt7_v_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_v_bf16m4_m(mask, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfrsqrt7_v_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv32bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfrsqrt7_v_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_v_bf16m8_m(mask, op1, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfrsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfrsub.c
new file mode 100644
index 0000000000000..c7c3869e7b77c
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfrsub.c
@@ -0,0 +1,129 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfrsub_vf_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfrsub.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfrsub_vf_bf16mf4(vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_vf_bf16mf4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfrsub_vf_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfrsub.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfrsub_vf_bf16mf2(vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_vf_bf16mf2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfrsub_vf_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfrsub.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfrsub_vf_bf16m1(vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_vf_bf16m1(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfrsub_vf_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfrsub.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfrsub_vf_bf16m2(vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_vf_bf16m2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfrsub_vf_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfrsub.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfrsub_vf_bf16m4(vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_vf_bf16m4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfrsub_vf_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfrsub.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfrsub_vf_bf16m8(vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_vf_bf16m8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfrsub_vf_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfrsub.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfrsub_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_vf_bf16mf4_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfrsub_vf_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfrsub.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfrsub_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_vf_bf16mf2_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfrsub_vf_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfrsub.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfrsub_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_vf_bf16m1_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfrsub_vf_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfrsub.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfrsub_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_vf_bf16m2_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfrsub_vf_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfrsub.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfrsub_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_vf_bf16m4_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfrsub_vf_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfrsub.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfrsub_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_vf_bf16m8_m(mask, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfsgnj.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfsgnj.c
new file mode 100644
index 0000000000000..778b8b83e9841
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfsgnj.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnj_vv_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnj.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnj_vv_bf16mf4(vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfsgnj_vv_bf16mf4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnj_vf_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnj.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnj_vf_bf16mf4(vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_vf_bf16mf4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnj_vv_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnj.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnj_vv_bf16mf2(vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfsgnj_vv_bf16mf2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnj_vf_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnj.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnj_vf_bf16mf2(vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_vf_bf16mf2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnj_vv_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnj.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnj_vv_bf16m1(vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfsgnj_vv_bf16m1(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnj_vf_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnj.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnj_vf_bf16m1(vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_vf_bf16m1(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnj_vv_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnj.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnj_vv_bf16m2(vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfsgnj_vv_bf16m2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnj_vf_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnj.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnj_vf_bf16m2(vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_vf_bf16m2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnj_vv_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnj.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnj_vv_bf16m4(vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfsgnj_vv_bf16m4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnj_vf_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnj.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnj_vf_bf16m4(vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_vf_bf16m4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnj_vv_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnj.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnj_vv_bf16m8(vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfsgnj_vv_bf16m8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnj_vf_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnj.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnj_vf_bf16m8(vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_vf_bf16m8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnj_vv_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnj.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnj_vv_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfsgnj_vv_bf16mf4_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnj_vf_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnj.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnj_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_vf_bf16mf4_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnj_vv_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnj.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnj_vv_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfsgnj_vv_bf16mf2_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnj_vf_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnj.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnj_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_vf_bf16mf2_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnj_vv_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnj.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnj_vv_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfsgnj_vv_bf16m1_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnj_vf_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnj.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnj_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_vf_bf16m1_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnj_vv_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnj.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnj_vv_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfsgnj_vv_bf16m2_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnj_vf_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnj.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnj_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_vf_bf16m2_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnj_vv_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnj.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnj_vv_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfsgnj_vv_bf16m4_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnj_vf_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnj.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnj_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_vf_bf16m4_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnj_vv_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnj.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnj_vv_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfsgnj_vv_bf16m8_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnj_vf_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnj.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnj_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_vf_bf16m8_m(mask, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfsgnjn.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfsgnjn.c
new file mode 100644
index 0000000000000..7de308978e1d3
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfsgnjn.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnjn_vv_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjn.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnjn_vv_bf16mf4(vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfsgnjn_vv_bf16mf4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnjn_vf_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjn.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnjn_vf_bf16mf4(vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_vf_bf16mf4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnjn_vv_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjn.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnjn_vv_bf16mf2(vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfsgnjn_vv_bf16mf2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnjn_vf_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjn.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnjn_vf_bf16mf2(vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_vf_bf16mf2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnjn_vv_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjn.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnjn_vv_bf16m1(vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfsgnjn_vv_bf16m1(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnjn_vf_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjn.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnjn_vf_bf16m1(vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_vf_bf16m1(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnjn_vv_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjn.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnjn_vv_bf16m2(vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfsgnjn_vv_bf16m2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnjn_vf_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjn.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnjn_vf_bf16m2(vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_vf_bf16m2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnjn_vv_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjn.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnjn_vv_bf16m4(vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfsgnjn_vv_bf16m4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnjn_vf_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjn.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnjn_vf_bf16m4(vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_vf_bf16m4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnjn_vv_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjn.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnjn_vv_bf16m8(vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfsgnjn_vv_bf16m8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnjn_vf_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjn.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnjn_vf_bf16m8(vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_vf_bf16m8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnjn_vv_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnjn_vv_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfsgnjn_vv_bf16mf4_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnjn_vf_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnjn_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_vf_bf16mf4_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnjn_vv_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnjn_vv_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfsgnjn_vv_bf16mf2_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnjn_vf_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnjn_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_vf_bf16mf2_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnjn_vv_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnjn_vv_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfsgnjn_vv_bf16m1_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnjn_vf_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnjn_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_vf_bf16m1_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnjn_vv_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnjn_vv_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfsgnjn_vv_bf16m2_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnjn_vf_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnjn_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_vf_bf16m2_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnjn_vv_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnjn_vv_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfsgnjn_vv_bf16m4_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnjn_vf_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnjn_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_vf_bf16m4_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnjn_vv_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnjn_vv_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfsgnjn_vv_bf16m8_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnjn_vf_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnjn_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_vf_bf16m8_m(mask, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfsgnjx.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfsgnjx.c
new file mode 100644
index 0000000000000..5fa285cc78b63
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfsgnjx.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnjx_vv_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjx.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnjx_vv_bf16mf4(vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfsgnjx_vv_bf16mf4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnjx_vf_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjx.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnjx_vf_bf16mf4(vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_vf_bf16mf4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnjx_vv_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjx.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnjx_vv_bf16mf2(vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfsgnjx_vv_bf16mf2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnjx_vf_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjx.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnjx_vf_bf16mf2(vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_vf_bf16mf2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnjx_vv_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjx.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnjx_vv_bf16m1(vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfsgnjx_vv_bf16m1(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnjx_vf_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjx.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnjx_vf_bf16m1(vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_vf_bf16m1(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnjx_vv_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjx.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnjx_vv_bf16m2(vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfsgnjx_vv_bf16m2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnjx_vf_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjx.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnjx_vf_bf16m2(vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_vf_bf16m2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnjx_vv_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjx.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnjx_vv_bf16m4(vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfsgnjx_vv_bf16m4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnjx_vf_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjx.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnjx_vf_bf16m4(vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_vf_bf16m4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnjx_vv_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjx.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnjx_vv_bf16m8(vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfsgnjx_vv_bf16m8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnjx_vf_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjx.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnjx_vf_bf16m8(vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_vf_bf16m8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnjx_vv_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnjx_vv_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfsgnjx_vv_bf16mf4_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnjx_vf_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnjx_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_vf_bf16mf4_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnjx_vv_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnjx_vv_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfsgnjx_vv_bf16mf2_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnjx_vf_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnjx_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_vf_bf16mf2_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnjx_vv_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnjx_vv_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfsgnjx_vv_bf16m1_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnjx_vf_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnjx_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_vf_bf16m1_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnjx_vv_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnjx_vv_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfsgnjx_vv_bf16m2_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnjx_vf_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnjx_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_vf_bf16m2_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnjx_vv_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnjx_vv_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfsgnjx_vv_bf16m4_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnjx_vf_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnjx_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_vf_bf16m4_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnjx_vv_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnjx_vv_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfsgnjx_vv_bf16m8_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnjx_vf_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnjx_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_vf_bf16m8_m(mask, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfslide1down.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfslide1down.c
new file mode 100644
index 0000000000000..b94d26b4ddf40
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfslide1down.c
@@ -0,0 +1,129 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfslide1down_vf_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfslide1down.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[SRC]], bfloat [[VALUE]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfslide1down_vf_bf16mf4(vbfloat16mf4_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_vf_bf16mf4(src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfslide1down_vf_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfslide1down.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[SRC]], bfloat [[VALUE]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfslide1down_vf_bf16mf2(vbfloat16mf2_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_vf_bf16mf2(src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfslide1down_vf_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfslide1down.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[SRC]], bfloat [[VALUE]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfslide1down_vf_bf16m1(vbfloat16m1_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_vf_bf16m1(src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfslide1down_vf_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfslide1down.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[SRC]], bfloat [[VALUE]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfslide1down_vf_bf16m2(vbfloat16m2_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_vf_bf16m2(src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfslide1down_vf_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfslide1down.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[SRC]], bfloat [[VALUE]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfslide1down_vf_bf16m4(vbfloat16m4_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_vf_bf16m4(src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfslide1down_vf_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfslide1down.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[SRC]], bfloat [[VALUE]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfslide1down_vf_bf16m8(vbfloat16m8_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_vf_bf16m8(src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfslide1down_vf_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfslide1down.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfslide1down_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_vf_bf16mf4_m(mask, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfslide1down_vf_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfslide1down.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfslide1down_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_vf_bf16mf2_m(mask, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfslide1down_vf_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfslide1down.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfslide1down_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_vf_bf16m1_m(mask, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfslide1down_vf_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfslide1down.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfslide1down_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_vf_bf16m2_m(mask, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfslide1down_vf_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfslide1down.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfslide1down_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_vf_bf16m4_m(mask, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfslide1down_vf_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfslide1down.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfslide1down_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_vf_bf16m8_m(mask, src, value, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfslide1up.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfslide1up.c
new file mode 100644
index 0000000000000..06e8b49af19d0
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfslide1up.c
@@ -0,0 +1,129 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfslide1up_vf_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfslide1up.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[SRC]], bfloat [[VALUE]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfslide1up_vf_bf16mf4(vbfloat16mf4_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_vf_bf16mf4(src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfslide1up_vf_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfslide1up.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[SRC]], bfloat [[VALUE]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfslide1up_vf_bf16mf2(vbfloat16mf2_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_vf_bf16mf2(src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfslide1up_vf_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfslide1up.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[SRC]], bfloat [[VALUE]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfslide1up_vf_bf16m1(vbfloat16m1_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_vf_bf16m1(src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfslide1up_vf_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfslide1up.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[SRC]], bfloat [[VALUE]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfslide1up_vf_bf16m2(vbfloat16m2_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_vf_bf16m2(src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfslide1up_vf_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfslide1up.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[SRC]], bfloat [[VALUE]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfslide1up_vf_bf16m4(vbfloat16m4_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_vf_bf16m4(src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfslide1up_vf_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfslide1up.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[SRC]], bfloat [[VALUE]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfslide1up_vf_bf16m8(vbfloat16m8_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_vf_bf16m8(src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfslide1up_vf_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfslide1up.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfslide1up_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_vf_bf16mf4_m(mask, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfslide1up_vf_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfslide1up.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfslide1up_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_vf_bf16mf2_m(mask, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfslide1up_vf_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfslide1up.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfslide1up_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_vf_bf16m1_m(mask, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfslide1up_vf_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfslide1up.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfslide1up_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_vf_bf16m2_m(mask, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfslide1up_vf_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfslide1up.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfslide1up_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_vf_bf16m4_m(mask, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfslide1up_vf_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfslide1up.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfslide1up_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_vf_bf16m8_m(mask, src, value, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfsub.c
new file mode 100644
index 0000000000000..2423b0bbdbb80
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfsub.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsub_vv_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsub.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsub_vv_bf16mf4(vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfsub_vv_bf16mf4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsub_vf_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsub.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsub_vf_bf16mf4(vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_vf_bf16mf4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsub_vv_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsub.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsub_vv_bf16mf2(vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfsub_vv_bf16mf2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsub_vf_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsub.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsub_vf_bf16mf2(vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_vf_bf16mf2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsub_vv_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsub.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsub_vv_bf16m1(vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfsub_vv_bf16m1(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsub_vf_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsub.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsub_vf_bf16m1(vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_vf_bf16m1(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsub_vv_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsub.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsub_vv_bf16m2(vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfsub_vv_bf16m2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsub_vf_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsub.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsub_vf_bf16m2(vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_vf_bf16m2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsub_vv_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsub.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsub_vv_bf16m4(vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfsub_vv_bf16m4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsub_vf_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsub.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsub_vf_bf16m4(vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_vf_bf16m4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsub_vv_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsub.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsub_vv_bf16m8(vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfsub_vv_bf16m8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsub_vf_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsub.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsub_vf_bf16m8(vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_vf_bf16m8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsub_vv_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsub.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsub_vv_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfsub_vv_bf16mf4_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsub_vf_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsub.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsub_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_vf_bf16mf4_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsub_vv_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsub.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsub_vv_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfsub_vv_bf16mf2_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsub_vf_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsub.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsub_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_vf_bf16mf2_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsub_vv_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsub.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsub_vv_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfsub_vv_bf16m1_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsub_vf_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsub.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsub_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_vf_bf16m1_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsub_vv_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsub.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsub_vv_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfsub_vv_bf16m2_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsub_vf_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsub.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsub_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_vf_bf16m2_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsub_vv_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsub.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsub_vv_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfsub_vv_bf16m4_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsub_vf_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsub.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsub_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_vf_bf16m4_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsub_vv_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsub.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsub_vv_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfsub_vv_bf16m8_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsub_vf_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsub.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsub_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_vf_bf16m8_m(mask, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfwadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfwadd.c
new file mode 100644
index 0000000000000..24d34f46f4203
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfwadd.c
@@ -0,0 +1,899 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_vv_bf16mf4_f32mf2(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_vv_bf16mf4_f32mf2(vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_bf16mf4_f32mf2(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_vf_bf16mf4_f32mf2(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_vf_bf16mf4_f32mf2(vbfloat16mf4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_bf16mf4_f32mf2(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_wv_bf16mf4_f32mf2(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[VS2]], <vscale x 1 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_wv_bf16mf4_f32mf2(vfloat32mf2_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_bf16mf4_f32mf2(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_wf_bf16_f32mf2(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.nxv1f32.bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_wf_bf16_f32mf2(vfloat32mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32mf2(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_vv_bf16mf2_f32m1(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_vv_bf16mf2_f32m1(vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_bf16mf2_f32m1(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_vf_bf16mf2_f32m1(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_vf_bf16mf2_f32m1(vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_bf16mf2_f32m1(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_wv_bf16mf2_f32m1(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[VS2]], <vscale x 2 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_wv_bf16mf2_f32m1(vfloat32m1_t vs2, vbfloat16mf2_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_wv_bf16mf2_f32m1(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_wf_bf16_f32m1(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.nxv2f32.bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_wf_bf16_f32m1(vfloat32m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32m1(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_vv_bf16m1_f32m2(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_vv_bf16m1_f32m2(vbfloat16m1_t vs2, vbfloat16m1_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_vv_bf16m1_f32m2(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_vf_bf16m1_f32m2(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_vf_bf16m1_f32m2(vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_bf16m1_f32m2(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_wv_bf16m1_f32m2(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[VS2]], <vscale x 4 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_wv_bf16m1_f32m2(vfloat32m2_t vs2, vbfloat16m1_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_wv_bf16m1_f32m2(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_wf_bf16_f32m2(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.nxv4f32.bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_wf_bf16_f32m2(vfloat32m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32m2(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_vv_bf16m2_f32m4(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_vv_bf16m2_f32m4(vbfloat16m2_t vs2, vbfloat16m2_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_vv_bf16m2_f32m4(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_vf_bf16m2_f32m4(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_vf_bf16m2_f32m4(vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_bf16m2_f32m4(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_wv_bf16m2_f32m4(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[VS2]], <vscale x 8 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_wv_bf16m2_f32m4(vfloat32m4_t vs2, vbfloat16m2_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_wv_bf16m2_f32m4(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_wf_bf16_f32m4(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.nxv8f32.bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_wf_bf16_f32m4(vfloat32m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32m4(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_vv_bf16m4_f32m8(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_vv_bf16m4_f32m8(vbfloat16m4_t vs2, vbfloat16m4_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_vv_bf16m4_f32m8(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_vf_bf16m4_f32m8(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_vf_bf16m4_f32m8(vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_bf16m4_f32m8(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_wv_bf16m4_f32m8(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[VS2]], <vscale x 16 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_wv_bf16m4_f32m8(vfloat32m8_t vs2, vbfloat16m4_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_wv_bf16m4_f32m8(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_wf_bf16_f32m8(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.nxv16f32.bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_wf_bf16_f32m8(vfloat32m8_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32m8(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_vv_bf16mf4_f32mf2_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_vv_bf16mf4_f32mf2_m(vbool64_t vm, vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_bf16mf4_f32mf2_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_vf_bf16mf4_f32mf2_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_vf_bf16mf4_f32mf2_m(vbool64_t vm, vbfloat16mf4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_vf_bf16mf4_f32mf2_m(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_wv_bf16mf4_f32mf2_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_wv_bf16mf4_f32mf2_m(vbool64_t vm, vfloat32mf2_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_bf16mf4_f32mf2_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_wf_bf16_f32mf2_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f32.bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_wf_bf16_f32mf2_m(vbool64_t vm, vfloat32mf2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32mf2_m(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_vv_bf16mf2_f32m1_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_vv_bf16mf2_f32m1_m(vbool32_t vm, vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_bf16mf2_f32m1_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_vf_bf16mf2_f32m1_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_vf_bf16mf2_f32m1_m(vbool32_t vm, vbfloat16mf2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_vf_bf16mf2_f32m1_m(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_wv_bf16mf2_f32m1_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_wv_bf16mf2_f32m1_m(vbool32_t vm, vfloat32m1_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_bf16mf2_f32m1_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_wf_bf16_f32m1_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f32.bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_wf_bf16_f32m1_m(vbool32_t vm, vfloat32m1_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32m1_m(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_vv_bf16m1_f32m2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_vv_bf16m1_f32m2_m(vbool16_t vm, vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_bf16m1_f32m2_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_vf_bf16m1_f32m2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_vf_bf16m1_f32m2_m(vbool16_t vm, vbfloat16m1_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_vf_bf16m1_f32m2_m(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_wv_bf16m1_f32m2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_wv_bf16m1_f32m2_m(vbool16_t vm, vfloat32m2_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_bf16m1_f32m2_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_wf_bf16_f32m2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f32.bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_wf_bf16_f32m2_m(vbool16_t vm, vfloat32m2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32m2_m(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_vv_bf16m2_f32m4_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_vv_bf16m2_f32m4_m(vbool8_t vm, vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_bf16m2_f32m4_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_vf_bf16m2_f32m4_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_vf_bf16m2_f32m4_m(vbool8_t vm, vbfloat16m2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_vf_bf16m2_f32m4_m(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_wv_bf16m2_f32m4_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_wv_bf16m2_f32m4_m(vbool8_t vm, vfloat32m4_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_bf16m2_f32m4_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_wf_bf16_f32m4_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f32.bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_wf_bf16_f32m4_m(vbool8_t vm, vfloat32m4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32m4_m(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_vv_bf16m4_f32m8_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_vv_bf16m4_f32m8_m(vbool4_t vm, vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_bf16m4_f32m8_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_vf_bf16m4_f32m8_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_vf_bf16m4_f32m8_m(vbool4_t vm, vbfloat16m4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_vf_bf16m4_f32m8_m(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_wv_bf16m4_f32m8_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_wv_bf16m4_f32m8_m(vbool4_t vm, vfloat32m8_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_bf16m4_f32m8_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_wf_bf16_f32m8_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f32.bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_wf_bf16_f32m8_m(vbool4_t vm, vfloat32m8_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32m8_m(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_vv_bf16mf4_f32mf2_rm(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_vv_bf16mf4_f32mf2_rm(vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_bf16mf4_f32mf2_rm(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_vf_bf16mf4_f32mf2_rm(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_vf_bf16mf4_f32mf2_rm(vbfloat16mf4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_bf16mf4_f32mf2_rm(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_wv_bf16mf4_f32mf2_rm(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[VS2]], <vscale x 1 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_wv_bf16mf4_f32mf2_rm(vfloat32mf2_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_bf16mf4_f32mf2_rm(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_wf_bf16_f32mf2_rm(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.nxv1f32.bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_wf_bf16_f32mf2_rm(vfloat32mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32mf2_rm(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_vv_bf16mf2_f32m1_rm(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_vv_bf16mf2_f32m1_rm(vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_bf16mf2_f32m1_rm(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_vf_bf16mf2_f32m1_rm(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_vf_bf16mf2_f32m1_rm(vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_bf16mf2_f32m1_rm(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_wv_bf16mf2_f32m1_rm(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[VS2]], <vscale x 2 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_wv_bf16mf2_f32m1_rm(vfloat32m1_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_bf16mf2_f32m1_rm(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_wf_bf16_f32m1_rm(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.nxv2f32.bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_wf_bf16_f32m1_rm(vfloat32m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32m1_rm(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_vv_bf16m1_f32m2_rm(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_vv_bf16m1_f32m2_rm(vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_bf16m1_f32m2_rm(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_vf_bf16m1_f32m2_rm(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_vf_bf16m1_f32m2_rm(vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_bf16m1_f32m2_rm(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_wv_bf16m1_f32m2_rm(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[VS2]], <vscale x 4 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_wv_bf16m1_f32m2_rm(vfloat32m2_t vs2, vbfloat16m1_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_wv_bf16m1_f32m2_rm(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_wf_bf16_f32m2_rm(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.nxv4f32.bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_wf_bf16_f32m2_rm(vfloat32m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32m2_rm(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_vv_bf16m2_f32m4_rm(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_vv_bf16m2_f32m4_rm(vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_bf16m2_f32m4_rm(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_vf_bf16m2_f32m4_rm(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_vf_bf16m2_f32m4_rm(vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_bf16m2_f32m4_rm(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_wv_bf16m2_f32m4_rm(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[VS2]], <vscale x 8 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_wv_bf16m2_f32m4_rm(vfloat32m4_t vs2, vbfloat16m2_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_wv_bf16m2_f32m4_rm(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_wf_bf16_f32m4_rm(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.nxv8f32.bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_wf_bf16_f32m4_rm(vfloat32m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32m4_rm(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_vv_bf16m4_f32m8_rm(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_vv_bf16m4_f32m8_rm(vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_bf16m4_f32m8_rm(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_vf_bf16m4_f32m8_rm(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_vf_bf16m4_f32m8_rm(vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_bf16m4_f32m8_rm(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_wv_bf16m4_f32m8_rm(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[VS2]], <vscale x 16 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_wv_bf16m4_f32m8_rm(vfloat32m8_t vs2, vbfloat16m4_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_wv_bf16m4_f32m8_rm(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_wf_bf16_f32m8_rm(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.nxv16f32.bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_wf_bf16_f32m8_rm(vfloat32m8_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32m8_rm(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_vv_bf16mf4_f32mf2_rm_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_vv_bf16mf4_f32mf2_rm_m(vbool64_t vm,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_vv_bf16mf4_f32mf2_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_vf_bf16mf4_f32mf2_rm_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_vf_bf16mf4_f32mf2_rm_m(vbool64_t vm,
+ vbfloat16mf4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_bf16mf4_f32mf2_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_wv_bf16mf4_f32mf2_rm_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_wv_bf16mf4_f32mf2_rm_m(vbool64_t vm,
+ vfloat32mf2_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_wv_bf16mf4_f32mf2_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_wf_bf16_f32mf2_rm_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f32.bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_wf_bf16_f32mf2_rm_m(vbool64_t vm, vfloat32mf2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32mf2_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_vv_bf16mf2_f32m1_rm_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_vv_bf16mf2_f32m1_rm_m(vbool32_t vm, vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_bf16mf2_f32m1_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_vf_bf16mf2_f32m1_rm_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_vf_bf16mf2_f32m1_rm_m(vbool32_t vm, vbfloat16mf2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_vf_bf16mf2_f32m1_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_wv_bf16mf2_f32m1_rm_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_wv_bf16mf2_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_bf16mf2_f32m1_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_wf_bf16_f32m1_rm_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f32.bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_wf_bf16_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32m1_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_vv_bf16m1_f32m2_rm_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_vv_bf16m1_f32m2_rm_m(vbool16_t vm, vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_bf16m1_f32m2_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_vf_bf16m1_f32m2_rm_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_vf_bf16m1_f32m2_rm_m(vbool16_t vm, vbfloat16m1_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_vf_bf16m1_f32m2_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_wv_bf16m1_f32m2_rm_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_wv_bf16m1_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_bf16m1_f32m2_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_wf_bf16_f32m2_rm_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f32.bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_wf_bf16_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32m2_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_vv_bf16m2_f32m4_rm_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_vv_bf16m2_f32m4_rm_m(vbool8_t vm, vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_bf16m2_f32m4_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_vf_bf16m2_f32m4_rm_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_vf_bf16m2_f32m4_rm_m(vbool8_t vm, vbfloat16m2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_vf_bf16m2_f32m4_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_wv_bf16m2_f32m4_rm_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_wv_bf16m2_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_bf16m2_f32m4_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_wf_bf16_f32m4_rm_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f32.bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_wf_bf16_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32m4_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_vv_bf16m4_f32m8_rm_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_vv_bf16m4_f32m8_rm_m(vbool4_t vm, vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_bf16m4_f32m8_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_vf_bf16m4_f32m8_rm_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_vf_bf16m4_f32m8_rm_m(vbool4_t vm, vbfloat16m4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_vf_bf16m4_f32m8_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_wv_bf16m4_f32m8_rm_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_wv_bf16m4_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_bf16m4_f32m8_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_wf_bf16_f32m8_rm_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f32.bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_wf_bf16_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32m8_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfwcvt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfwcvt.c
new file mode 100644
index 0000000000000..fb3e0031af98c
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfwcvt.c
@@ -0,0 +1,366 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfwcvt_f_x_v_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfwcvt.f.x.v.nxv1bf16.nxv1i8.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfwcvt_f_x_v_bf16mf4(vint8mf8_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_x_v_bf16mf4(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfwcvt_f_x_v_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfwcvt.f.x.v.nxv2bf16.nxv2i8.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfwcvt_f_x_v_bf16mf2(vint8mf4_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_x_v_bf16mf2(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfwcvt_f_x_v_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfwcvt.f.x.v.nxv4bf16.nxv4i8.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfwcvt_f_x_v_bf16m1(vint8mf2_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_x_v_bf16m1(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfwcvt_f_x_v_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfwcvt.f.x.v.nxv8bf16.nxv8i8.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfwcvt_f_x_v_bf16m2(vint8m1_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_x_v_bf16m2(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfwcvt_f_x_v_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfwcvt.f.x.v.nxv16bf16.nxv16i8.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfwcvt_f_x_v_bf16m4(vint8m2_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_x_v_bf16m4(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfwcvt_f_x_v_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfwcvt.f.x.v.nxv32bf16.nxv32i8.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfwcvt_f_x_v_bf16m8(vint8m4_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_x_v_bf16m8(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfwcvt_f_xu_v_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.nxv1bf16.nxv1i8.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfwcvt_f_xu_v_bf16mf4(vuint8mf8_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_xu_v_bf16mf4(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfwcvt_f_xu_v_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.nxv2bf16.nxv2i8.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfwcvt_f_xu_v_bf16mf2(vuint8mf4_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_xu_v_bf16mf2(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfwcvt_f_xu_v_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.nxv4bf16.nxv4i8.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfwcvt_f_xu_v_bf16m1(vuint8mf2_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_xu_v_bf16m1(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfwcvt_f_xu_v_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.nxv8bf16.nxv8i8.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfwcvt_f_xu_v_bf16m2(vuint8m1_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_xu_v_bf16m2(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfwcvt_f_xu_v_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.nxv16bf16.nxv16i8.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfwcvt_f_xu_v_bf16m4(vuint8m2_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_xu_v_bf16m4(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfwcvt_f_xu_v_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.nxv32bf16.nxv32i8.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfwcvt_f_xu_v_bf16m8(vuint8m4_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_xu_v_bf16m8(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwcvt_f_f_v_bf16mf4_f32mf2(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwcvt.f.f.v.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwcvt_f_f_v_bf16mf4_f32mf2(vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_f_v_bf16mf4_f32mf2(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwcvt_f_f_v_bf16mf2_f32m1(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwcvt.f.f.v.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwcvt_f_f_v_bf16mf2_f32m1(vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_f_v_bf16mf2_f32m1(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwcvt_f_f_v_bf16m1_f32m2(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwcvt.f.f.v.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwcvt_f_f_v_bf16m1_f32m2(vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_f_v_bf16m1_f32m2(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwcvt_f_f_v_bf16m2_f32m4(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwcvt.f.f.v.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwcvt_f_f_v_bf16m2_f32m4(vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_f_v_bf16m2_f32m4(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwcvt_f_f_v_bf16m4_f32m8(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwcvt.f.f.v.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwcvt_f_f_v_bf16m4_f32m8(vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_f_v_bf16m4_f32m8(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfwcvt_f_x_v_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv1bf16.nxv1i8.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x i8> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfwcvt_f_x_v_bf16mf4_m(vbool64_t vm, vint8mf8_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_x_v_bf16mf4_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfwcvt_f_x_v_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv2bf16.nxv2i8.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x i8> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfwcvt_f_x_v_bf16mf2_m(vbool32_t vm, vint8mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_x_v_bf16mf2_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfwcvt_f_x_v_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv4bf16.nxv4i8.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x i8> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfwcvt_f_x_v_bf16m1_m(vbool16_t vm, vint8mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_x_v_bf16m1_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfwcvt_f_x_v_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv8bf16.nxv8i8.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x i8> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfwcvt_f_x_v_bf16m2_m(vbool8_t vm, vint8m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_x_v_bf16m2_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfwcvt_f_x_v_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv16bf16.nxv16i8.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x i8> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfwcvt_f_x_v_bf16m4_m(vbool4_t vm, vint8m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_x_v_bf16m4_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfwcvt_f_x_v_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv32bf16.nxv32i8.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x i8> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfwcvt_f_x_v_bf16m8_m(vbool2_t vm, vint8m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_x_v_bf16m8_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfwcvt_f_xu_v_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv1bf16.nxv1i8.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x i8> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfwcvt_f_xu_v_bf16mf4_m(vbool64_t vm, vuint8mf8_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_xu_v_bf16mf4_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfwcvt_f_xu_v_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv2bf16.nxv2i8.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x i8> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfwcvt_f_xu_v_bf16mf2_m(vbool32_t vm, vuint8mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_xu_v_bf16mf2_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfwcvt_f_xu_v_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv4bf16.nxv4i8.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x i8> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfwcvt_f_xu_v_bf16m1_m(vbool16_t vm, vuint8mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_xu_v_bf16m1_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfwcvt_f_xu_v_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv8bf16.nxv8i8.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x i8> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfwcvt_f_xu_v_bf16m2_m(vbool8_t vm, vuint8m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_xu_v_bf16m2_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfwcvt_f_xu_v_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv16bf16.nxv16i8.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x i8> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfwcvt_f_xu_v_bf16m4_m(vbool4_t vm, vuint8m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_xu_v_bf16m4_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfwcvt_f_xu_v_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv32bf16.nxv32i8.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x i8> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfwcvt_f_xu_v_bf16m8_m(vbool2_t vm, vuint8m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_xu_v_bf16m8_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwcvt_f_f_v_bf16mf4_f32mf2_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwcvt_f_f_v_bf16mf4_f32mf2_m(vbool64_t vm,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_f_v_bf16mf4_f32mf2_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwcvt_f_f_v_bf16mf2_f32m1_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwcvt_f_f_v_bf16mf2_f32m1_m(vbool32_t vm, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_f_v_bf16mf2_f32m1_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwcvt_f_f_v_bf16m1_f32m2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwcvt_f_f_v_bf16m1_f32m2_m(vbool16_t vm, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_f_v_bf16m1_f32m2_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwcvt_f_f_v_bf16m2_f32m4_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwcvt_f_f_v_bf16m2_f32m4_m(vbool8_t vm, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_f_v_bf16m2_f32m4_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwcvt_f_f_v_bf16m4_f32m8_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwcvt_f_f_v_bf16m4_f32m8_m(vbool4_t vm, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_f_v_bf16m4_f32m8_m(vm, vs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfwmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfwmacc.c
new file mode 100644
index 0000000000000..be09003320386
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfwmacc.c
@@ -0,0 +1,486 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmacc_vv_bf16mf4_f32mf2(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmacc_vv_bf16mf4_f32mf2(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vv_bf16mf4_f32mf2(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmacc_vf_bf16mf4_f32mf2(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmacc_vf_bf16mf4_f32mf2(vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vf_bf16mf4_f32mf2(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmacc_vv_bf16mf2_f32m1(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmacc_vv_bf16mf2_f32m1(vfloat32m1_t vd, vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vv_bf16mf2_f32m1(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmacc_vf_bf16mf2_f32m1(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmacc_vf_bf16mf2_f32m1(vfloat32m1_t vd, __bf16 vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vf_bf16mf2_f32m1(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmacc_vv_bf16m1_f32m2(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmacc_vv_bf16m1_f32m2(vfloat32m2_t vd, vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vv_bf16m1_f32m2(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmacc_vf_bf16m1_f32m2(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmacc_vf_bf16m1_f32m2(vfloat32m2_t vd, __bf16 vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vf_bf16m1_f32m2(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmacc_vv_bf16m2_f32m4(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmacc_vv_bf16m2_f32m4(vfloat32m4_t vd, vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vv_bf16m2_f32m4(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmacc_vf_bf16m2_f32m4(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmacc_vf_bf16m2_f32m4(vfloat32m4_t vd, __bf16 vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vf_bf16m2_f32m4(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmacc_vv_bf16m4_f32m8(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmacc_vv_bf16m4_f32m8(vfloat32m8_t vd, vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vv_bf16m4_f32m8(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmacc_vf_bf16m4_f32m8(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmacc_vf_bf16m4_f32m8(vfloat32m8_t vd, __bf16 vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vf_bf16m4_f32m8(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmacc_vv_bf16mf4_f32mf2_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmacc_vv_bf16mf4_f32mf2_m(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vv_bf16mf4_f32mf2_m(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmacc_vf_bf16mf4_f32mf2_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmacc_vf_bf16mf4_f32mf2_m(vbool64_t vm, vfloat32mf2_t vd,
+ __bf16 vs1, vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vf_bf16mf4_f32mf2_m(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmacc_vv_bf16mf2_f32m1_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmacc_vv_bf16mf2_f32m1_m(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vv_bf16mf2_f32m1_m(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmacc_vf_bf16mf2_f32m1_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmacc_vf_bf16mf2_f32m1_m(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vf_bf16mf2_f32m1_m(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmacc_vv_bf16m1_f32m2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmacc_vv_bf16m1_f32m2_m(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vv_bf16m1_f32m2_m(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmacc_vf_bf16m1_f32m2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmacc_vf_bf16m1_f32m2_m(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vf_bf16m1_f32m2_m(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmacc_vv_bf16m2_f32m4_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmacc_vv_bf16m2_f32m4_m(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vv_bf16m2_f32m4_m(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmacc_vf_bf16m2_f32m4_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmacc_vf_bf16m2_f32m4_m(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vf_bf16m2_f32m4_m(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmacc_vv_bf16m4_f32m8_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmacc_vv_bf16m4_f32m8_m(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vv_bf16m4_f32m8_m(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmacc_vf_bf16m4_f32m8_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmacc_vf_bf16m4_f32m8_m(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vf_bf16m4_f32m8_m(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmacc_vv_bf16mf4_f32mf2_rm(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmacc_vv_bf16mf4_f32mf2_rm(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vv_bf16mf4_f32mf2_rm(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmacc_vf_bf16mf4_f32mf2_rm(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmacc_vf_bf16mf4_f32mf2_rm(vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vf_bf16mf4_f32mf2_rm(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmacc_vv_bf16mf2_f32m1_rm(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmacc_vv_bf16mf2_f32m1_rm(vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vv_bf16mf2_f32m1_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmacc_vf_bf16mf2_f32m1_rm(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmacc_vf_bf16mf2_f32m1_rm(vfloat32m1_t vd, __bf16 vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vf_bf16mf2_f32m1_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmacc_vv_bf16m1_f32m2_rm(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmacc_vv_bf16m1_f32m2_rm(vfloat32m2_t vd, vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vv_bf16m1_f32m2_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmacc_vf_bf16m1_f32m2_rm(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmacc_vf_bf16m1_f32m2_rm(vfloat32m2_t vd, __bf16 vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vf_bf16m1_f32m2_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmacc_vv_bf16m2_f32m4_rm(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmacc_vv_bf16m2_f32m4_rm(vfloat32m4_t vd, vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vv_bf16m2_f32m4_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmacc_vf_bf16m2_f32m4_rm(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmacc_vf_bf16m2_f32m4_rm(vfloat32m4_t vd, __bf16 vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vf_bf16m2_f32m4_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmacc_vv_bf16m4_f32m8_rm(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmacc_vv_bf16m4_f32m8_rm(vfloat32m8_t vd, vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vv_bf16m4_f32m8_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmacc_vf_bf16m4_f32m8_rm(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmacc_vf_bf16m4_f32m8_rm(vfloat32m8_t vd, __bf16 vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vf_bf16m4_f32m8_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmacc_vv_bf16mf4_f32mf2_rm_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmacc_vv_bf16mf4_f32mf2_rm_m(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vv_bf16mf4_f32mf2_rm_m(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmacc_vf_bf16mf4_f32mf2_rm_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmacc_vf_bf16mf4_f32mf2_rm_m(vbool64_t vm,
+ vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vf_bf16mf4_f32mf2_rm_m(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmacc_vv_bf16mf2_f32m1_rm_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmacc_vv_bf16mf2_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vv_bf16mf2_f32m1_rm_m(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmacc_vf_bf16mf2_f32m1_rm_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmacc_vf_bf16mf2_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vf_bf16mf2_f32m1_rm_m(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmacc_vv_bf16m1_f32m2_rm_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmacc_vv_bf16m1_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vv_bf16m1_f32m2_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmacc_vf_bf16m1_f32m2_rm_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmacc_vf_bf16m1_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vf_bf16m1_f32m2_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmacc_vv_bf16m2_f32m4_rm_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmacc_vv_bf16m2_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vv_bf16m2_f32m4_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmacc_vf_bf16m2_f32m4_rm_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmacc_vf_bf16m2_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vf_bf16m2_f32m4_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmacc_vv_bf16m4_f32m8_rm_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmacc_vv_bf16m4_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vv_bf16m4_f32m8_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmacc_vf_bf16m4_f32m8_rm_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmacc_vf_bf16m4_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vf_bf16m4_f32m8_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfwmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfwmsac.c
new file mode 100644
index 0000000000000..749081333c2b3
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfwmsac.c
@@ -0,0 +1,486 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmsac_vv_bf16mf4_f32mf2(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmsac_vv_bf16mf4_f32mf2(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vv_bf16mf4_f32mf2(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmsac_vf_bf16mf4_f32mf2(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmsac_vf_bf16mf4_f32mf2(vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vf_bf16mf4_f32mf2(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmsac_vv_bf16mf2_f32m1(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmsac_vv_bf16mf2_f32m1(vfloat32m1_t vd, vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vv_bf16mf2_f32m1(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmsac_vf_bf16mf2_f32m1(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmsac_vf_bf16mf2_f32m1(vfloat32m1_t vd, __bf16 vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vf_bf16mf2_f32m1(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmsac_vv_bf16m1_f32m2(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmsac_vv_bf16m1_f32m2(vfloat32m2_t vd, vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vv_bf16m1_f32m2(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmsac_vf_bf16m1_f32m2(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmsac_vf_bf16m1_f32m2(vfloat32m2_t vd, __bf16 vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vf_bf16m1_f32m2(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmsac_vv_bf16m2_f32m4(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmsac_vv_bf16m2_f32m4(vfloat32m4_t vd, vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vv_bf16m2_f32m4(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmsac_vf_bf16m2_f32m4(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmsac_vf_bf16m2_f32m4(vfloat32m4_t vd, __bf16 vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vf_bf16m2_f32m4(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmsac_vv_bf16m4_f32m8(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmsac_vv_bf16m4_f32m8(vfloat32m8_t vd, vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vv_bf16m4_f32m8(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmsac_vf_bf16m4_f32m8(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmsac_vf_bf16m4_f32m8(vfloat32m8_t vd, __bf16 vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vf_bf16m4_f32m8(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmsac_vv_bf16mf4_f32mf2_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmsac_vv_bf16mf4_f32mf2_m(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vv_bf16mf4_f32mf2_m(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmsac_vf_bf16mf4_f32mf2_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmsac_vf_bf16mf4_f32mf2_m(vbool64_t vm, vfloat32mf2_t vd,
+ __bf16 vs1, vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vf_bf16mf4_f32mf2_m(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmsac_vv_bf16mf2_f32m1_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmsac_vv_bf16mf2_f32m1_m(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vv_bf16mf2_f32m1_m(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmsac_vf_bf16mf2_f32m1_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmsac_vf_bf16mf2_f32m1_m(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vf_bf16mf2_f32m1_m(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmsac_vv_bf16m1_f32m2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmsac_vv_bf16m1_f32m2_m(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vv_bf16m1_f32m2_m(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmsac_vf_bf16m1_f32m2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmsac_vf_bf16m1_f32m2_m(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vf_bf16m1_f32m2_m(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmsac_vv_bf16m2_f32m4_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmsac_vv_bf16m2_f32m4_m(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vv_bf16m2_f32m4_m(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmsac_vf_bf16m2_f32m4_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmsac_vf_bf16m2_f32m4_m(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vf_bf16m2_f32m4_m(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmsac_vv_bf16m4_f32m8_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmsac_vv_bf16m4_f32m8_m(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vv_bf16m4_f32m8_m(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmsac_vf_bf16m4_f32m8_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmsac_vf_bf16m4_f32m8_m(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vf_bf16m4_f32m8_m(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmsac_vv_bf16mf4_f32mf2_rm(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmsac_vv_bf16mf4_f32mf2_rm(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vv_bf16mf4_f32mf2_rm(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmsac_vf_bf16mf4_f32mf2_rm(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmsac_vf_bf16mf4_f32mf2_rm(vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vf_bf16mf4_f32mf2_rm(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmsac_vv_bf16mf2_f32m1_rm(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmsac_vv_bf16mf2_f32m1_rm(vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vv_bf16mf2_f32m1_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmsac_vf_bf16mf2_f32m1_rm(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmsac_vf_bf16mf2_f32m1_rm(vfloat32m1_t vd, __bf16 vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vf_bf16mf2_f32m1_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmsac_vv_bf16m1_f32m2_rm(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmsac_vv_bf16m1_f32m2_rm(vfloat32m2_t vd, vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vv_bf16m1_f32m2_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmsac_vf_bf16m1_f32m2_rm(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmsac_vf_bf16m1_f32m2_rm(vfloat32m2_t vd, __bf16 vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vf_bf16m1_f32m2_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmsac_vv_bf16m2_f32m4_rm(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmsac_vv_bf16m2_f32m4_rm(vfloat32m4_t vd, vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vv_bf16m2_f32m4_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmsac_vf_bf16m2_f32m4_rm(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmsac_vf_bf16m2_f32m4_rm(vfloat32m4_t vd, __bf16 vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vf_bf16m2_f32m4_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmsac_vv_bf16m4_f32m8_rm(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmsac_vv_bf16m4_f32m8_rm(vfloat32m8_t vd, vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vv_bf16m4_f32m8_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmsac_vf_bf16m4_f32m8_rm(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmsac_vf_bf16m4_f32m8_rm(vfloat32m8_t vd, __bf16 vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vf_bf16m4_f32m8_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmsac_vv_bf16mf4_f32mf2_rm_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmsac_vv_bf16mf4_f32mf2_rm_m(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vv_bf16mf4_f32mf2_rm_m(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmsac_vf_bf16mf4_f32mf2_rm_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmsac_vf_bf16mf4_f32mf2_rm_m(vbool64_t vm,
+ vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vf_bf16mf4_f32mf2_rm_m(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmsac_vv_bf16mf2_f32m1_rm_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmsac_vv_bf16mf2_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vv_bf16mf2_f32m1_rm_m(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmsac_vf_bf16mf2_f32m1_rm_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmsac_vf_bf16mf2_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vf_bf16mf2_f32m1_rm_m(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmsac_vv_bf16m1_f32m2_rm_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmsac_vv_bf16m1_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vv_bf16m1_f32m2_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmsac_vf_bf16m1_f32m2_rm_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmsac_vf_bf16m1_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vf_bf16m1_f32m2_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmsac_vv_bf16m2_f32m4_rm_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmsac_vv_bf16m2_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vv_bf16m2_f32m4_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmsac_vf_bf16m2_f32m4_rm_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmsac_vf_bf16m2_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vf_bf16m2_f32m4_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmsac_vv_bf16m4_f32m8_rm_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmsac_vv_bf16m4_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vv_bf16m4_f32m8_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmsac_vf_bf16m4_f32m8_rm_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmsac_vf_bf16m4_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vf_bf16m4_f32m8_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfwmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfwmul.c
new file mode 100644
index 0000000000000..6783ba43b0570
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfwmul.c
@@ -0,0 +1,455 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmul_vv_bf16mf4_f32mf2(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmul_vv_bf16mf4_f32mf2(vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwmul_vv_bf16mf4_f32mf2(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmul_vf_bf16mf4_f32mf2(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmul_vf_bf16mf4_f32mf2(vbfloat16mf4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_vf_bf16mf4_f32mf2(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmul_vv_bf16mf2_f32m1(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmul_vv_bf16mf2_f32m1(vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwmul_vv_bf16mf2_f32m1(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmul_vf_bf16mf2_f32m1(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmul_vf_bf16mf2_f32m1(vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_vf_bf16mf2_f32m1(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmul_vv_bf16m1_f32m2(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmul_vv_bf16m1_f32m2(vbfloat16m1_t vs2, vbfloat16m1_t vs1,
+ size_t vl) {
+ return __riscv_vfwmul_vv_bf16m1_f32m2(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmul_vf_bf16m1_f32m2(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmul_vf_bf16m1_f32m2(vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_vf_bf16m1_f32m2(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmul_vv_bf16m2_f32m4(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmul_vv_bf16m2_f32m4(vbfloat16m2_t vs2, vbfloat16m2_t vs1,
+ size_t vl) {
+ return __riscv_vfwmul_vv_bf16m2_f32m4(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmul_vf_bf16m2_f32m4(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmul_vf_bf16m2_f32m4(vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_vf_bf16m2_f32m4(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmul_vv_bf16m4_f32m8(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmul_vv_bf16m4_f32m8(vbfloat16m4_t vs2, vbfloat16m4_t vs1,
+ size_t vl) {
+ return __riscv_vfwmul_vv_bf16m4_f32m8(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmul_vf_bf16m4_f32m8(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmul_vf_bf16m4_f32m8(vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_vf_bf16m4_f32m8(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmul_vv_bf16mf4_f32mf2_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmul_vv_bf16mf4_f32mf2_m(vbool64_t vm, vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwmul_vv_bf16mf4_f32mf2_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmul_vf_bf16mf4_f32mf2_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmul_vf_bf16mf4_f32mf2_m(vbool64_t vm, vbfloat16mf4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwmul_vf_bf16mf4_f32mf2_m(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmul_vv_bf16mf2_f32m1_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmul_vv_bf16mf2_f32m1_m(vbool32_t vm, vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwmul_vv_bf16mf2_f32m1_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmul_vf_bf16mf2_f32m1_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmul_vf_bf16mf2_f32m1_m(vbool32_t vm, vbfloat16mf2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwmul_vf_bf16mf2_f32m1_m(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmul_vv_bf16m1_f32m2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmul_vv_bf16m1_f32m2_m(vbool16_t vm, vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwmul_vv_bf16m1_f32m2_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmul_vf_bf16m1_f32m2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmul_vf_bf16m1_f32m2_m(vbool16_t vm, vbfloat16m1_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwmul_vf_bf16m1_f32m2_m(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmul_vv_bf16m2_f32m4_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmul_vv_bf16m2_f32m4_m(vbool8_t vm, vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwmul_vv_bf16m2_f32m4_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmul_vf_bf16m2_f32m4_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmul_vf_bf16m2_f32m4_m(vbool8_t vm, vbfloat16m2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwmul_vf_bf16m2_f32m4_m(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmul_vv_bf16m4_f32m8_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmul_vv_bf16m4_f32m8_m(vbool4_t vm, vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwmul_vv_bf16m4_f32m8_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmul_vf_bf16m4_f32m8_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmul_vf_bf16m4_f32m8_m(vbool4_t vm, vbfloat16m4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwmul_vf_bf16m4_f32m8_m(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmul_vv_bf16mf4_f32mf2_rm(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmul_vv_bf16mf4_f32mf2_rm(vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwmul_vv_bf16mf4_f32mf2_rm(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmul_vf_bf16mf4_f32mf2_rm(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmul_vf_bf16mf4_f32mf2_rm(vbfloat16mf4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_vf_bf16mf4_f32mf2_rm(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmul_vv_bf16mf2_f32m1_rm(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmul_vv_bf16mf2_f32m1_rm(vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwmul_vv_bf16mf2_f32m1_rm(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmul_vf_bf16mf2_f32m1_rm(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmul_vf_bf16mf2_f32m1_rm(vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_vf_bf16mf2_f32m1_rm(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmul_vv_bf16m1_f32m2_rm(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmul_vv_bf16m1_f32m2_rm(vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwmul_vv_bf16m1_f32m2_rm(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmul_vf_bf16m1_f32m2_rm(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmul_vf_bf16m1_f32m2_rm(vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_vf_bf16m1_f32m2_rm(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmul_vv_bf16m2_f32m4_rm(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmul_vv_bf16m2_f32m4_rm(vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwmul_vv_bf16m2_f32m4_rm(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmul_vf_bf16m2_f32m4_rm(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmul_vf_bf16m2_f32m4_rm(vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_vf_bf16m2_f32m4_rm(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmul_vv_bf16m4_f32m8_rm(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmul_vv_bf16m4_f32m8_rm(vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwmul_vv_bf16m4_f32m8_rm(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmul_vf_bf16m4_f32m8_rm(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmul_vf_bf16m4_f32m8_rm(vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_vf_bf16m4_f32m8_rm(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmul_vv_bf16mf4_f32mf2_rm_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmul_vv_bf16mf4_f32mf2_rm_m(vbool64_t vm,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwmul_vv_bf16mf4_f32mf2_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmul_vf_bf16mf4_f32mf2_rm_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmul_vf_bf16mf4_f32mf2_rm_m(vbool64_t vm,
+ vbfloat16mf4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_vf_bf16mf4_f32mf2_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmul_vv_bf16mf2_f32m1_rm_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmul_vv_bf16mf2_f32m1_rm_m(vbool32_t vm, vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwmul_vv_bf16mf2_f32m1_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmul_vf_bf16mf2_f32m1_rm_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmul_vf_bf16mf2_f32m1_rm_m(vbool32_t vm, vbfloat16mf2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwmul_vf_bf16mf2_f32m1_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmul_vv_bf16m1_f32m2_rm_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmul_vv_bf16m1_f32m2_rm_m(vbool16_t vm, vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwmul_vv_bf16m1_f32m2_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmul_vf_bf16m1_f32m2_rm_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmul_vf_bf16m1_f32m2_rm_m(vbool16_t vm, vbfloat16m1_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwmul_vf_bf16m1_f32m2_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmul_vv_bf16m2_f32m4_rm_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmul_vv_bf16m2_f32m4_rm_m(vbool8_t vm, vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwmul_vv_bf16m2_f32m4_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmul_vf_bf16m2_f32m4_rm_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmul_vf_bf16m2_f32m4_rm_m(vbool8_t vm, vbfloat16m2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwmul_vf_bf16m2_f32m4_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmul_vv_bf16m4_f32m8_rm_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmul_vv_bf16m4_f32m8_rm_m(vbool4_t vm, vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwmul_vv_bf16m4_f32m8_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmul_vf_bf16m4_f32m8_rm_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmul_vf_bf16m4_f32m8_rm_m(vbool4_t vm, vbfloat16m4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwmul_vf_bf16m4_f32m8_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfwnmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfwnmacc.c
new file mode 100644
index 0000000000000..6127a94c919d9
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfwnmacc.c
@@ -0,0 +1,494 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmacc_vv_bf16mf4_f32mf2(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmacc_vv_bf16mf4_f32mf2(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16mf4_f32mf2(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmacc_vf_bf16mf4_f32mf2(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmacc_vf_bf16mf4_f32mf2(vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16mf4_f32mf2(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmacc_vv_bf16mf2_f32m1(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmacc_vv_bf16mf2_f32m1(vfloat32m1_t vd, vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16mf2_f32m1(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmacc_vf_bf16mf2_f32m1(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmacc_vf_bf16mf2_f32m1(vfloat32m1_t vd, __bf16 vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16mf2_f32m1(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmacc_vv_bf16m1_f32m2(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmacc_vv_bf16m1_f32m2(vfloat32m2_t vd, vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16m1_f32m2(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmacc_vf_bf16m1_f32m2(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmacc_vf_bf16m1_f32m2(vfloat32m2_t vd, __bf16 vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16m1_f32m2(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmacc_vv_bf16m2_f32m4(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmacc_vv_bf16m2_f32m4(vfloat32m4_t vd, vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16m2_f32m4(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmacc_vf_bf16m2_f32m4(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmacc_vf_bf16m2_f32m4(vfloat32m4_t vd, __bf16 vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16m2_f32m4(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmacc_vv_bf16m4_f32m8(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmacc_vv_bf16m4_f32m8(vfloat32m8_t vd, vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16m4_f32m8(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmacc_vf_bf16m4_f32m8(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmacc_vf_bf16m4_f32m8(vfloat32m8_t vd, __bf16 vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16m4_f32m8(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmacc_vv_bf16mf4_f32mf2_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmacc_vv_bf16mf4_f32mf2_m(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16mf4_f32mf2_m(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmacc_vf_bf16mf4_f32mf2_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmacc_vf_bf16mf4_f32mf2_m(vbool64_t vm, vfloat32mf2_t vd,
+ __bf16 vs1, vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16mf4_f32mf2_m(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmacc_vv_bf16mf2_f32m1_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmacc_vv_bf16mf2_f32m1_m(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16mf2_f32m1_m(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmacc_vf_bf16mf2_f32m1_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmacc_vf_bf16mf2_f32m1_m(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16mf2_f32m1_m(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmacc_vv_bf16m1_f32m2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmacc_vv_bf16m1_f32m2_m(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16m1_f32m2_m(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmacc_vf_bf16m1_f32m2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmacc_vf_bf16m1_f32m2_m(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16m1_f32m2_m(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmacc_vv_bf16m2_f32m4_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmacc_vv_bf16m2_f32m4_m(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16m2_f32m4_m(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmacc_vf_bf16m2_f32m4_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmacc_vf_bf16m2_f32m4_m(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16m2_f32m4_m(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmacc_vv_bf16m4_f32m8_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmacc_vv_bf16m4_f32m8_m(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16m4_f32m8_m(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmacc_vf_bf16m4_f32m8_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmacc_vf_bf16m4_f32m8_m(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16m4_f32m8_m(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmacc_vv_bf16mf4_f32mf2_rm(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmacc_vv_bf16mf4_f32mf2_rm(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16mf4_f32mf2_rm(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmacc_vf_bf16mf4_f32mf2_rm(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmacc_vf_bf16mf4_f32mf2_rm(vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16mf4_f32mf2_rm(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmacc_vv_bf16mf2_f32m1_rm(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmacc_vv_bf16mf2_f32m1_rm(vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16mf2_f32m1_rm(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmacc_vf_bf16mf2_f32m1_rm(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmacc_vf_bf16mf2_f32m1_rm(vfloat32m1_t vd, __bf16 vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16mf2_f32m1_rm(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmacc_vv_bf16m1_f32m2_rm(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmacc_vv_bf16m1_f32m2_rm(vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16m1_f32m2_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmacc_vf_bf16m1_f32m2_rm(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmacc_vf_bf16m1_f32m2_rm(vfloat32m2_t vd, __bf16 vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16m1_f32m2_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmacc_vv_bf16m2_f32m4_rm(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmacc_vv_bf16m2_f32m4_rm(vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16m2_f32m4_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmacc_vf_bf16m2_f32m4_rm(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmacc_vf_bf16m2_f32m4_rm(vfloat32m4_t vd, __bf16 vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16m2_f32m4_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmacc_vv_bf16m4_f32m8_rm(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmacc_vv_bf16m4_f32m8_rm(vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16m4_f32m8_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmacc_vf_bf16m4_f32m8_rm(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmacc_vf_bf16m4_f32m8_rm(vfloat32m8_t vd, __bf16 vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16m4_f32m8_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmacc_vv_bf16mf4_f32mf2_rm_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmacc_vv_bf16mf4_f32mf2_rm_m(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16mf4_f32mf2_rm_m(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmacc_vf_bf16mf4_f32mf2_rm_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmacc_vf_bf16mf4_f32mf2_rm_m(vbool64_t vm,
+ vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16mf4_f32mf2_rm_m(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmacc_vv_bf16mf2_f32m1_rm_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmacc_vv_bf16mf2_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16mf2_f32m1_rm_m(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmacc_vf_bf16mf2_f32m1_rm_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmacc_vf_bf16mf2_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16mf2_f32m1_rm_m(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmacc_vv_bf16m1_f32m2_rm_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmacc_vv_bf16m1_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16m1_f32m2_rm_m(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmacc_vf_bf16m1_f32m2_rm_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmacc_vf_bf16m1_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16m1_f32m2_rm_m(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmacc_vv_bf16m2_f32m4_rm_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmacc_vv_bf16m2_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16m2_f32m4_rm_m(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmacc_vf_bf16m2_f32m4_rm_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmacc_vf_bf16m2_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16m2_f32m4_rm_m(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmacc_vv_bf16m4_f32m8_rm_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmacc_vv_bf16m4_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16m4_f32m8_rm_m(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmacc_vf_bf16m4_f32m8_rm_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmacc_vf_bf16m4_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16m4_f32m8_rm_m(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfwnmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfwnmsac.c
new file mode 100644
index 0000000000000..f37dd310d944d
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfwnmsac.c
@@ -0,0 +1,494 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmsac_vv_bf16mf4_f32mf2(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmsac_vv_bf16mf4_f32mf2(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16mf4_f32mf2(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmsac_vf_bf16mf4_f32mf2(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmsac_vf_bf16mf4_f32mf2(vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16mf4_f32mf2(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmsac_vv_bf16mf2_f32m1(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmsac_vv_bf16mf2_f32m1(vfloat32m1_t vd, vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16mf2_f32m1(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmsac_vf_bf16mf2_f32m1(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmsac_vf_bf16mf2_f32m1(vfloat32m1_t vd, __bf16 vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16mf2_f32m1(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmsac_vv_bf16m1_f32m2(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmsac_vv_bf16m1_f32m2(vfloat32m2_t vd, vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16m1_f32m2(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmsac_vf_bf16m1_f32m2(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmsac_vf_bf16m1_f32m2(vfloat32m2_t vd, __bf16 vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16m1_f32m2(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmsac_vv_bf16m2_f32m4(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmsac_vv_bf16m2_f32m4(vfloat32m4_t vd, vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16m2_f32m4(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmsac_vf_bf16m2_f32m4(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmsac_vf_bf16m2_f32m4(vfloat32m4_t vd, __bf16 vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16m2_f32m4(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmsac_vv_bf16m4_f32m8(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmsac_vv_bf16m4_f32m8(vfloat32m8_t vd, vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16m4_f32m8(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmsac_vf_bf16m4_f32m8(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmsac_vf_bf16m4_f32m8(vfloat32m8_t vd, __bf16 vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16m4_f32m8(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmsac_vv_bf16mf4_f32mf2_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmsac_vv_bf16mf4_f32mf2_m(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16mf4_f32mf2_m(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmsac_vf_bf16mf4_f32mf2_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmsac_vf_bf16mf4_f32mf2_m(vbool64_t vm, vfloat32mf2_t vd,
+ __bf16 vs1, vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16mf4_f32mf2_m(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmsac_vv_bf16mf2_f32m1_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmsac_vv_bf16mf2_f32m1_m(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16mf2_f32m1_m(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmsac_vf_bf16mf2_f32m1_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmsac_vf_bf16mf2_f32m1_m(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16mf2_f32m1_m(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmsac_vv_bf16m1_f32m2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmsac_vv_bf16m1_f32m2_m(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16m1_f32m2_m(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmsac_vf_bf16m1_f32m2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmsac_vf_bf16m1_f32m2_m(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16m1_f32m2_m(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmsac_vv_bf16m2_f32m4_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmsac_vv_bf16m2_f32m4_m(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16m2_f32m4_m(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmsac_vf_bf16m2_f32m4_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmsac_vf_bf16m2_f32m4_m(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16m2_f32m4_m(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmsac_vv_bf16m4_f32m8_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmsac_vv_bf16m4_f32m8_m(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16m4_f32m8_m(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmsac_vf_bf16m4_f32m8_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmsac_vf_bf16m4_f32m8_m(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16m4_f32m8_m(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmsac_vv_bf16mf4_f32mf2_rm(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmsac_vv_bf16mf4_f32mf2_rm(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16mf4_f32mf2_rm(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmsac_vf_bf16mf4_f32mf2_rm(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmsac_vf_bf16mf4_f32mf2_rm(vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16mf4_f32mf2_rm(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmsac_vv_bf16mf2_f32m1_rm(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmsac_vv_bf16mf2_f32m1_rm(vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16mf2_f32m1_rm(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmsac_vf_bf16mf2_f32m1_rm(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmsac_vf_bf16mf2_f32m1_rm(vfloat32m1_t vd, __bf16 vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16mf2_f32m1_rm(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmsac_vv_bf16m1_f32m2_rm(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmsac_vv_bf16m1_f32m2_rm(vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16m1_f32m2_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmsac_vf_bf16m1_f32m2_rm(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmsac_vf_bf16m1_f32m2_rm(vfloat32m2_t vd, __bf16 vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16m1_f32m2_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmsac_vv_bf16m2_f32m4_rm(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmsac_vv_bf16m2_f32m4_rm(vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16m2_f32m4_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmsac_vf_bf16m2_f32m4_rm(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmsac_vf_bf16m2_f32m4_rm(vfloat32m4_t vd, __bf16 vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16m2_f32m4_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmsac_vv_bf16m4_f32m8_rm(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmsac_vv_bf16m4_f32m8_rm(vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16m4_f32m8_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmsac_vf_bf16m4_f32m8_rm(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmsac_vf_bf16m4_f32m8_rm(vfloat32m8_t vd, __bf16 vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16m4_f32m8_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmsac_vv_bf16mf4_f32mf2_rm_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmsac_vv_bf16mf4_f32mf2_rm_m(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16mf4_f32mf2_rm_m(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmsac_vf_bf16mf4_f32mf2_rm_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmsac_vf_bf16mf4_f32mf2_rm_m(vbool64_t vm,
+ vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16mf4_f32mf2_rm_m(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmsac_vv_bf16mf2_f32m1_rm_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmsac_vv_bf16mf2_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16mf2_f32m1_rm_m(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmsac_vf_bf16mf2_f32m1_rm_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmsac_vf_bf16mf2_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16mf2_f32m1_rm_m(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmsac_vv_bf16m1_f32m2_rm_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmsac_vv_bf16m1_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16m1_f32m2_rm_m(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmsac_vf_bf16m1_f32m2_rm_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmsac_vf_bf16m1_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16m1_f32m2_rm_m(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmsac_vv_bf16m2_f32m4_rm_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmsac_vv_bf16m2_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16m2_f32m4_rm_m(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmsac_vf_bf16m2_f32m4_rm_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmsac_vf_bf16m2_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16m2_f32m4_rm_m(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmsac_vv_bf16m4_f32m8_rm_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmsac_vv_bf16m4_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16m4_f32m8_rm_m(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmsac_vf_bf16m4_f32m8_rm_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmsac_vf_bf16m4_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16m4_f32m8_rm_m(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfwsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfwsub.c
new file mode 100644
index 0000000000000..510ff9193389e
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfwsub.c
@@ -0,0 +1,899 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_vv_bf16mf4_f32mf2(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_vv_bf16mf4_f32mf2(vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_bf16mf4_f32mf2(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_vf_bf16mf4_f32mf2(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_vf_bf16mf4_f32mf2(vbfloat16mf4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_bf16mf4_f32mf2(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_wv_bf16mf4_f32mf2(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[VS2]], <vscale x 1 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_wv_bf16mf4_f32mf2(vfloat32mf2_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_bf16mf4_f32mf2(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_wf_bf16_f32mf2(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.nxv1f32.bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_wf_bf16_f32mf2(vfloat32mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32mf2(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_vv_bf16mf2_f32m1(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_vv_bf16mf2_f32m1(vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_bf16mf2_f32m1(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_vf_bf16mf2_f32m1(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_vf_bf16mf2_f32m1(vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_bf16mf2_f32m1(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_wv_bf16mf2_f32m1(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[VS2]], <vscale x 2 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_wv_bf16mf2_f32m1(vfloat32m1_t vs2, vbfloat16mf2_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_wv_bf16mf2_f32m1(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_wf_bf16_f32m1(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.nxv2f32.bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_wf_bf16_f32m1(vfloat32m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32m1(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_vv_bf16m1_f32m2(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_vv_bf16m1_f32m2(vbfloat16m1_t vs2, vbfloat16m1_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_vv_bf16m1_f32m2(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_vf_bf16m1_f32m2(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_vf_bf16m1_f32m2(vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_bf16m1_f32m2(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_wv_bf16m1_f32m2(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[VS2]], <vscale x 4 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_wv_bf16m1_f32m2(vfloat32m2_t vs2, vbfloat16m1_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_wv_bf16m1_f32m2(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_wf_bf16_f32m2(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.nxv4f32.bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_wf_bf16_f32m2(vfloat32m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32m2(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_vv_bf16m2_f32m4(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_vv_bf16m2_f32m4(vbfloat16m2_t vs2, vbfloat16m2_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_vv_bf16m2_f32m4(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_vf_bf16m2_f32m4(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_vf_bf16m2_f32m4(vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_bf16m2_f32m4(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_wv_bf16m2_f32m4(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[VS2]], <vscale x 8 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_wv_bf16m2_f32m4(vfloat32m4_t vs2, vbfloat16m2_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_wv_bf16m2_f32m4(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_wf_bf16_f32m4(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.nxv8f32.bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_wf_bf16_f32m4(vfloat32m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32m4(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_vv_bf16m4_f32m8(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_vv_bf16m4_f32m8(vbfloat16m4_t vs2, vbfloat16m4_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_vv_bf16m4_f32m8(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_vf_bf16m4_f32m8(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_vf_bf16m4_f32m8(vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_bf16m4_f32m8(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_wv_bf16m4_f32m8(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[VS2]], <vscale x 16 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_wv_bf16m4_f32m8(vfloat32m8_t vs2, vbfloat16m4_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_wv_bf16m4_f32m8(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_wf_bf16_f32m8(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.nxv16f32.bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_wf_bf16_f32m8(vfloat32m8_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32m8(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_vv_bf16mf4_f32mf2_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_vv_bf16mf4_f32mf2_m(vbool64_t vm, vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_bf16mf4_f32mf2_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_vf_bf16mf4_f32mf2_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_vf_bf16mf4_f32mf2_m(vbool64_t vm, vbfloat16mf4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_vf_bf16mf4_f32mf2_m(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_wv_bf16mf4_f32mf2_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.mask.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_wv_bf16mf4_f32mf2_m(vbool64_t vm, vfloat32mf2_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_bf16mf4_f32mf2_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_wf_bf16_f32mf2_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.mask.nxv1f32.bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_wf_bf16_f32mf2_m(vbool64_t vm, vfloat32mf2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32mf2_m(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_vv_bf16mf2_f32m1_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_vv_bf16mf2_f32m1_m(vbool32_t vm, vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_bf16mf2_f32m1_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_vf_bf16mf2_f32m1_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_vf_bf16mf2_f32m1_m(vbool32_t vm, vbfloat16mf2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_vf_bf16mf2_f32m1_m(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_wv_bf16mf2_f32m1_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.mask.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_wv_bf16mf2_f32m1_m(vbool32_t vm, vfloat32m1_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_bf16mf2_f32m1_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_wf_bf16_f32m1_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.mask.nxv2f32.bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_wf_bf16_f32m1_m(vbool32_t vm, vfloat32m1_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32m1_m(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_vv_bf16m1_f32m2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_vv_bf16m1_f32m2_m(vbool16_t vm, vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_bf16m1_f32m2_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_vf_bf16m1_f32m2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_vf_bf16m1_f32m2_m(vbool16_t vm, vbfloat16m1_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_vf_bf16m1_f32m2_m(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_wv_bf16m1_f32m2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.mask.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_wv_bf16m1_f32m2_m(vbool16_t vm, vfloat32m2_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_bf16m1_f32m2_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_wf_bf16_f32m2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.mask.nxv4f32.bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_wf_bf16_f32m2_m(vbool16_t vm, vfloat32m2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32m2_m(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_vv_bf16m2_f32m4_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_vv_bf16m2_f32m4_m(vbool8_t vm, vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_bf16m2_f32m4_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_vf_bf16m2_f32m4_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_vf_bf16m2_f32m4_m(vbool8_t vm, vbfloat16m2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_vf_bf16m2_f32m4_m(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_wv_bf16m2_f32m4_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.mask.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_wv_bf16m2_f32m4_m(vbool8_t vm, vfloat32m4_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_bf16m2_f32m4_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_wf_bf16_f32m4_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.mask.nxv8f32.bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_wf_bf16_f32m4_m(vbool8_t vm, vfloat32m4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32m4_m(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_vv_bf16m4_f32m8_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_vv_bf16m4_f32m8_m(vbool4_t vm, vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_bf16m4_f32m8_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_vf_bf16m4_f32m8_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_vf_bf16m4_f32m8_m(vbool4_t vm, vbfloat16m4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_vf_bf16m4_f32m8_m(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_wv_bf16m4_f32m8_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.mask.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_wv_bf16m4_f32m8_m(vbool4_t vm, vfloat32m8_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_bf16m4_f32m8_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_wf_bf16_f32m8_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.mask.nxv16f32.bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_wf_bf16_f32m8_m(vbool4_t vm, vfloat32m8_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32m8_m(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_vv_bf16mf4_f32mf2_rm(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_vv_bf16mf4_f32mf2_rm(vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_bf16mf4_f32mf2_rm(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_vf_bf16mf4_f32mf2_rm(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_vf_bf16mf4_f32mf2_rm(vbfloat16mf4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_bf16mf4_f32mf2_rm(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_wv_bf16mf4_f32mf2_rm(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[VS2]], <vscale x 1 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_wv_bf16mf4_f32mf2_rm(vfloat32mf2_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_bf16mf4_f32mf2_rm(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_wf_bf16_f32mf2_rm(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.nxv1f32.bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_wf_bf16_f32mf2_rm(vfloat32mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32mf2_rm(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_vv_bf16mf2_f32m1_rm(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_vv_bf16mf2_f32m1_rm(vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_bf16mf2_f32m1_rm(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_vf_bf16mf2_f32m1_rm(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_vf_bf16mf2_f32m1_rm(vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_bf16mf2_f32m1_rm(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_wv_bf16mf2_f32m1_rm(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[VS2]], <vscale x 2 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_wv_bf16mf2_f32m1_rm(vfloat32m1_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_bf16mf2_f32m1_rm(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_wf_bf16_f32m1_rm(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.nxv2f32.bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_wf_bf16_f32m1_rm(vfloat32m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32m1_rm(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_vv_bf16m1_f32m2_rm(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_vv_bf16m1_f32m2_rm(vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_bf16m1_f32m2_rm(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_vf_bf16m1_f32m2_rm(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_vf_bf16m1_f32m2_rm(vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_bf16m1_f32m2_rm(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_wv_bf16m1_f32m2_rm(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[VS2]], <vscale x 4 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_wv_bf16m1_f32m2_rm(vfloat32m2_t vs2, vbfloat16m1_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_wv_bf16m1_f32m2_rm(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_wf_bf16_f32m2_rm(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.nxv4f32.bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_wf_bf16_f32m2_rm(vfloat32m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32m2_rm(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_vv_bf16m2_f32m4_rm(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_vv_bf16m2_f32m4_rm(vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_bf16m2_f32m4_rm(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_vf_bf16m2_f32m4_rm(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_vf_bf16m2_f32m4_rm(vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_bf16m2_f32m4_rm(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_wv_bf16m2_f32m4_rm(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[VS2]], <vscale x 8 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_wv_bf16m2_f32m4_rm(vfloat32m4_t vs2, vbfloat16m2_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_wv_bf16m2_f32m4_rm(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_wf_bf16_f32m4_rm(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.nxv8f32.bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_wf_bf16_f32m4_rm(vfloat32m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32m4_rm(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_vv_bf16m4_f32m8_rm(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_vv_bf16m4_f32m8_rm(vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_bf16m4_f32m8_rm(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_vf_bf16m4_f32m8_rm(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_vf_bf16m4_f32m8_rm(vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_bf16m4_f32m8_rm(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_wv_bf16m4_f32m8_rm(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[VS2]], <vscale x 16 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_wv_bf16m4_f32m8_rm(vfloat32m8_t vs2, vbfloat16m4_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_wv_bf16m4_f32m8_rm(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_wf_bf16_f32m8_rm(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.nxv16f32.bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_wf_bf16_f32m8_rm(vfloat32m8_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32m8_rm(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_vv_bf16mf4_f32mf2_rm_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_vv_bf16mf4_f32mf2_rm_m(vbool64_t vm,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_vv_bf16mf4_f32mf2_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_vf_bf16mf4_f32mf2_rm_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_vf_bf16mf4_f32mf2_rm_m(vbool64_t vm,
+ vbfloat16mf4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_bf16mf4_f32mf2_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_wv_bf16mf4_f32mf2_rm_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.mask.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_wv_bf16mf4_f32mf2_rm_m(vbool64_t vm,
+ vfloat32mf2_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_wv_bf16mf4_f32mf2_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_wf_bf16_f32mf2_rm_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.mask.nxv1f32.bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_wf_bf16_f32mf2_rm_m(vbool64_t vm, vfloat32mf2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32mf2_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_vv_bf16mf2_f32m1_rm_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_vv_bf16mf2_f32m1_rm_m(vbool32_t vm, vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_bf16mf2_f32m1_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_vf_bf16mf2_f32m1_rm_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_vf_bf16mf2_f32m1_rm_m(vbool32_t vm, vbfloat16mf2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_vf_bf16mf2_f32m1_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_wv_bf16mf2_f32m1_rm_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.mask.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_wv_bf16mf2_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_bf16mf2_f32m1_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_wf_bf16_f32m1_rm_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.mask.nxv2f32.bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_wf_bf16_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32m1_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_vv_bf16m1_f32m2_rm_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_vv_bf16m1_f32m2_rm_m(vbool16_t vm, vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_bf16m1_f32m2_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_vf_bf16m1_f32m2_rm_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_vf_bf16m1_f32m2_rm_m(vbool16_t vm, vbfloat16m1_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_vf_bf16m1_f32m2_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_wv_bf16m1_f32m2_rm_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.mask.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_wv_bf16m1_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_bf16m1_f32m2_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_wf_bf16_f32m2_rm_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.mask.nxv4f32.bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_wf_bf16_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32m2_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_vv_bf16m2_f32m4_rm_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_vv_bf16m2_f32m4_rm_m(vbool8_t vm, vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_bf16m2_f32m4_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_vf_bf16m2_f32m4_rm_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_vf_bf16m2_f32m4_rm_m(vbool8_t vm, vbfloat16m2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_vf_bf16m2_f32m4_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_wv_bf16m2_f32m4_rm_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.mask.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_wv_bf16m2_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_bf16m2_f32m4_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_wf_bf16_f32m4_rm_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.mask.nxv8f32.bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_wf_bf16_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32m4_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_vv_bf16m4_f32m8_rm_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_vv_bf16m4_f32m8_rm_m(vbool4_t vm, vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_bf16m4_f32m8_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_vf_bf16m4_f32m8_rm_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_vf_bf16m4_f32m8_rm_m(vbool4_t vm, vbfloat16m4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_vf_bf16m4_f32m8_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_wv_bf16m4_f32m8_rm_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.mask.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_wv_bf16m4_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_bf16m4_f32m8_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_wf_bf16_f32m8_rm_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.mask.nxv16f32.bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_wf_bf16_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32m8_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vmfeq.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vmfeq.c
new file mode 100644
index 0000000000000..669d0427b569a
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vmfeq.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfeq_vv_bf16mf4_b64(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfeq.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfeq_vv_bf16mf4_b64(vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vmfeq_vv_bf16mf4_b64(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfeq_vf_bf16mf4_b64(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfeq.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfeq_vf_bf16mf4_b64(vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfeq_vf_bf16mf4_b64(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfeq_vv_bf16mf2_b32(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfeq.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfeq_vv_bf16mf2_b32(vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vmfeq_vv_bf16mf2_b32(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfeq_vf_bf16mf2_b32(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfeq.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfeq_vf_bf16mf2_b32(vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfeq_vf_bf16mf2_b32(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfeq_vv_bf16m1_b16(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfeq.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfeq_vv_bf16m1_b16(vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vmfeq_vv_bf16m1_b16(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfeq_vf_bf16m1_b16(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfeq.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfeq_vf_bf16m1_b16(vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfeq_vf_bf16m1_b16(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfeq_vv_bf16m2_b8(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfeq.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfeq_vv_bf16m2_b8(vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vmfeq_vv_bf16m2_b8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfeq_vf_bf16m2_b8(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfeq.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfeq_vf_bf16m2_b8(vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfeq_vf_bf16m2_b8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfeq_vv_bf16m4_b4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfeq.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfeq_vv_bf16m4_b4(vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vmfeq_vv_bf16m4_b4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfeq_vf_bf16m4_b4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfeq.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfeq_vf_bf16m4_b4(vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfeq_vf_bf16m4_b4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfeq_vv_bf16m8_b2(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfeq.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfeq_vv_bf16m8_b2(vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vmfeq_vv_bf16m8_b2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfeq_vf_bf16m8_b2(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfeq.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfeq_vf_bf16m8_b2(vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfeq_vf_bf16m8_b2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfeq_vv_bf16mf4_b64_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfeq.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x i1> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfeq_vv_bf16mf4_b64_m(vbool64_t mask, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vmfeq_vv_bf16mf4_b64_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfeq_vf_bf16mf4_b64_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfeq.mask.nxv1bf16.bf16.i64(<vscale x 1 x i1> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfeq_vf_bf16mf4_b64_m(vbool64_t mask, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfeq_vf_bf16mf4_b64_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfeq_vv_bf16mf2_b32_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfeq.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x i1> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfeq_vv_bf16mf2_b32_m(vbool32_t mask, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vmfeq_vv_bf16mf2_b32_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfeq_vf_bf16mf2_b32_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfeq.mask.nxv2bf16.bf16.i64(<vscale x 2 x i1> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfeq_vf_bf16mf2_b32_m(vbool32_t mask, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfeq_vf_bf16mf2_b32_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfeq_vv_bf16m1_b16_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfeq.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x i1> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfeq_vv_bf16m1_b16_m(vbool16_t mask, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vmfeq_vv_bf16m1_b16_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfeq_vf_bf16m1_b16_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfeq.mask.nxv4bf16.bf16.i64(<vscale x 4 x i1> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfeq_vf_bf16m1_b16_m(vbool16_t mask, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfeq_vf_bf16m1_b16_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfeq_vv_bf16m2_b8_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfeq.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x i1> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfeq_vv_bf16m2_b8_m(vbool8_t mask, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vmfeq_vv_bf16m2_b8_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfeq_vf_bf16m2_b8_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfeq.mask.nxv8bf16.bf16.i64(<vscale x 8 x i1> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfeq_vf_bf16m2_b8_m(vbool8_t mask, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfeq_vf_bf16m2_b8_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfeq_vv_bf16m4_b4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfeq.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x i1> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfeq_vv_bf16m4_b4_m(vbool4_t mask, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vmfeq_vv_bf16m4_b4_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfeq_vf_bf16m4_b4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfeq.mask.nxv16bf16.bf16.i64(<vscale x 16 x i1> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfeq_vf_bf16m4_b4_m(vbool4_t mask, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfeq_vf_bf16m4_b4_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfeq_vv_bf16m8_b2_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfeq.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x i1> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfeq_vv_bf16m8_b2_m(vbool2_t mask, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vmfeq_vv_bf16m8_b2_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfeq_vf_bf16m8_b2_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfeq.mask.nxv32bf16.bf16.i64(<vscale x 32 x i1> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfeq_vf_bf16m8_b2_m(vbool2_t mask, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfeq_vf_bf16m8_b2_m(mask, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vmfge.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vmfge.c
new file mode 100644
index 0000000000000..b169efd51a0b4
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vmfge.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfge_vv_bf16mf4_b64(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfge.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfge_vv_bf16mf4_b64(vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vmfge_vv_bf16mf4_b64(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfge_vf_bf16mf4_b64(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfge.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfge_vf_bf16mf4_b64(vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfge_vf_bf16mf4_b64(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfge_vv_bf16mf2_b32(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfge.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfge_vv_bf16mf2_b32(vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vmfge_vv_bf16mf2_b32(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfge_vf_bf16mf2_b32(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfge.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfge_vf_bf16mf2_b32(vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfge_vf_bf16mf2_b32(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfge_vv_bf16m1_b16(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfge.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfge_vv_bf16m1_b16(vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vmfge_vv_bf16m1_b16(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfge_vf_bf16m1_b16(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfge.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfge_vf_bf16m1_b16(vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfge_vf_bf16m1_b16(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfge_vv_bf16m2_b8(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfge.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfge_vv_bf16m2_b8(vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vmfge_vv_bf16m2_b8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfge_vf_bf16m2_b8(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfge.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfge_vf_bf16m2_b8(vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfge_vf_bf16m2_b8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfge_vv_bf16m4_b4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfge.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfge_vv_bf16m4_b4(vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vmfge_vv_bf16m4_b4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfge_vf_bf16m4_b4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfge.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfge_vf_bf16m4_b4(vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfge_vf_bf16m4_b4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfge_vv_bf16m8_b2(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfge.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfge_vv_bf16m8_b2(vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vmfge_vv_bf16m8_b2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfge_vf_bf16m8_b2(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfge.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfge_vf_bf16m8_b2(vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfge_vf_bf16m8_b2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfge_vv_bf16mf4_b64_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfge.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x i1> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfge_vv_bf16mf4_b64_m(vbool64_t mask, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vmfge_vv_bf16mf4_b64_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfge_vf_bf16mf4_b64_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfge.mask.nxv1bf16.bf16.i64(<vscale x 1 x i1> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfge_vf_bf16mf4_b64_m(vbool64_t mask, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfge_vf_bf16mf4_b64_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfge_vv_bf16mf2_b32_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfge.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x i1> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfge_vv_bf16mf2_b32_m(vbool32_t mask, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vmfge_vv_bf16mf2_b32_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfge_vf_bf16mf2_b32_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfge.mask.nxv2bf16.bf16.i64(<vscale x 2 x i1> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfge_vf_bf16mf2_b32_m(vbool32_t mask, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfge_vf_bf16mf2_b32_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfge_vv_bf16m1_b16_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfge.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x i1> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfge_vv_bf16m1_b16_m(vbool16_t mask, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vmfge_vv_bf16m1_b16_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfge_vf_bf16m1_b16_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfge.mask.nxv4bf16.bf16.i64(<vscale x 4 x i1> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfge_vf_bf16m1_b16_m(vbool16_t mask, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfge_vf_bf16m1_b16_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfge_vv_bf16m2_b8_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfge.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x i1> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfge_vv_bf16m2_b8_m(vbool8_t mask, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vmfge_vv_bf16m2_b8_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfge_vf_bf16m2_b8_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfge.mask.nxv8bf16.bf16.i64(<vscale x 8 x i1> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfge_vf_bf16m2_b8_m(vbool8_t mask, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfge_vf_bf16m2_b8_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfge_vv_bf16m4_b4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfge.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x i1> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfge_vv_bf16m4_b4_m(vbool4_t mask, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vmfge_vv_bf16m4_b4_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfge_vf_bf16m4_b4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfge.mask.nxv16bf16.bf16.i64(<vscale x 16 x i1> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfge_vf_bf16m4_b4_m(vbool4_t mask, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfge_vf_bf16m4_b4_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfge_vv_bf16m8_b2_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfge.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x i1> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfge_vv_bf16m8_b2_m(vbool2_t mask, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vmfge_vv_bf16m8_b2_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfge_vf_bf16m8_b2_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfge.mask.nxv32bf16.bf16.i64(<vscale x 32 x i1> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfge_vf_bf16m8_b2_m(vbool2_t mask, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfge_vf_bf16m8_b2_m(mask, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vmfgt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vmfgt.c
new file mode 100644
index 0000000000000..9aea7d24b0edc
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vmfgt.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfgt_vv_bf16mf4_b64(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfgt.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfgt_vv_bf16mf4_b64(vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vmfgt_vv_bf16mf4_b64(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfgt_vf_bf16mf4_b64(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfgt.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfgt_vf_bf16mf4_b64(vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfgt_vf_bf16mf4_b64(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfgt_vv_bf16mf2_b32(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfgt.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfgt_vv_bf16mf2_b32(vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vmfgt_vv_bf16mf2_b32(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfgt_vf_bf16mf2_b32(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfgt.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfgt_vf_bf16mf2_b32(vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfgt_vf_bf16mf2_b32(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfgt_vv_bf16m1_b16(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfgt.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfgt_vv_bf16m1_b16(vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vmfgt_vv_bf16m1_b16(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfgt_vf_bf16m1_b16(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfgt.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfgt_vf_bf16m1_b16(vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfgt_vf_bf16m1_b16(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfgt_vv_bf16m2_b8(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfgt.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfgt_vv_bf16m2_b8(vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vmfgt_vv_bf16m2_b8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfgt_vf_bf16m2_b8(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfgt.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfgt_vf_bf16m2_b8(vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfgt_vf_bf16m2_b8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfgt_vv_bf16m4_b4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfgt.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfgt_vv_bf16m4_b4(vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vmfgt_vv_bf16m4_b4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfgt_vf_bf16m4_b4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfgt.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfgt_vf_bf16m4_b4(vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfgt_vf_bf16m4_b4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfgt_vv_bf16m8_b2(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfgt.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfgt_vv_bf16m8_b2(vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vmfgt_vv_bf16m8_b2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfgt_vf_bf16m8_b2(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfgt.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfgt_vf_bf16m8_b2(vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfgt_vf_bf16m8_b2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfgt_vv_bf16mf4_b64_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfgt.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x i1> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfgt_vv_bf16mf4_b64_m(vbool64_t mask, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vmfgt_vv_bf16mf4_b64_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfgt_vf_bf16mf4_b64_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfgt.mask.nxv1bf16.bf16.i64(<vscale x 1 x i1> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfgt_vf_bf16mf4_b64_m(vbool64_t mask, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfgt_vf_bf16mf4_b64_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfgt_vv_bf16mf2_b32_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfgt.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x i1> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfgt_vv_bf16mf2_b32_m(vbool32_t mask, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vmfgt_vv_bf16mf2_b32_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfgt_vf_bf16mf2_b32_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfgt.mask.nxv2bf16.bf16.i64(<vscale x 2 x i1> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfgt_vf_bf16mf2_b32_m(vbool32_t mask, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfgt_vf_bf16mf2_b32_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfgt_vv_bf16m1_b16_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfgt.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x i1> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfgt_vv_bf16m1_b16_m(vbool16_t mask, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vmfgt_vv_bf16m1_b16_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfgt_vf_bf16m1_b16_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfgt.mask.nxv4bf16.bf16.i64(<vscale x 4 x i1> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfgt_vf_bf16m1_b16_m(vbool16_t mask, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfgt_vf_bf16m1_b16_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfgt_vv_bf16m2_b8_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfgt.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x i1> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfgt_vv_bf16m2_b8_m(vbool8_t mask, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vmfgt_vv_bf16m2_b8_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfgt_vf_bf16m2_b8_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfgt.mask.nxv8bf16.bf16.i64(<vscale x 8 x i1> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfgt_vf_bf16m2_b8_m(vbool8_t mask, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfgt_vf_bf16m2_b8_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfgt_vv_bf16m4_b4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfgt.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x i1> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfgt_vv_bf16m4_b4_m(vbool4_t mask, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vmfgt_vv_bf16m4_b4_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfgt_vf_bf16m4_b4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfgt.mask.nxv16bf16.bf16.i64(<vscale x 16 x i1> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfgt_vf_bf16m4_b4_m(vbool4_t mask, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfgt_vf_bf16m4_b4_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfgt_vv_bf16m8_b2_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfgt.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x i1> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfgt_vv_bf16m8_b2_m(vbool2_t mask, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vmfgt_vv_bf16m8_b2_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfgt_vf_bf16m8_b2_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfgt.mask.nxv32bf16.bf16.i64(<vscale x 32 x i1> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfgt_vf_bf16m8_b2_m(vbool2_t mask, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfgt_vf_bf16m8_b2_m(mask, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vmfle.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vmfle.c
new file mode 100644
index 0000000000000..40f0c27f5b37a
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vmfle.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfle_vv_bf16mf4_b64(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfle.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfle_vv_bf16mf4_b64(vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vmfle_vv_bf16mf4_b64(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfle_vf_bf16mf4_b64(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfle.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfle_vf_bf16mf4_b64(vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfle_vf_bf16mf4_b64(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfle_vv_bf16mf2_b32(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfle.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfle_vv_bf16mf2_b32(vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vmfle_vv_bf16mf2_b32(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfle_vf_bf16mf2_b32(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfle.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfle_vf_bf16mf2_b32(vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfle_vf_bf16mf2_b32(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfle_vv_bf16m1_b16(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfle.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfle_vv_bf16m1_b16(vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vmfle_vv_bf16m1_b16(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfle_vf_bf16m1_b16(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfle.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfle_vf_bf16m1_b16(vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfle_vf_bf16m1_b16(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfle_vv_bf16m2_b8(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfle.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfle_vv_bf16m2_b8(vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vmfle_vv_bf16m2_b8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfle_vf_bf16m2_b8(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfle.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfle_vf_bf16m2_b8(vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfle_vf_bf16m2_b8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfle_vv_bf16m4_b4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfle.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfle_vv_bf16m4_b4(vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vmfle_vv_bf16m4_b4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfle_vf_bf16m4_b4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfle.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfle_vf_bf16m4_b4(vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfle_vf_bf16m4_b4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfle_vv_bf16m8_b2(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfle.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfle_vv_bf16m8_b2(vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vmfle_vv_bf16m8_b2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfle_vf_bf16m8_b2(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfle.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfle_vf_bf16m8_b2(vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfle_vf_bf16m8_b2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfle_vv_bf16mf4_b64_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfle.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x i1> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfle_vv_bf16mf4_b64_m(vbool64_t mask, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vmfle_vv_bf16mf4_b64_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfle_vf_bf16mf4_b64_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfle.mask.nxv1bf16.bf16.i64(<vscale x 1 x i1> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfle_vf_bf16mf4_b64_m(vbool64_t mask, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfle_vf_bf16mf4_b64_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfle_vv_bf16mf2_b32_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfle.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x i1> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfle_vv_bf16mf2_b32_m(vbool32_t mask, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vmfle_vv_bf16mf2_b32_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfle_vf_bf16mf2_b32_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfle.mask.nxv2bf16.bf16.i64(<vscale x 2 x i1> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfle_vf_bf16mf2_b32_m(vbool32_t mask, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfle_vf_bf16mf2_b32_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfle_vv_bf16m1_b16_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfle.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x i1> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfle_vv_bf16m1_b16_m(vbool16_t mask, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vmfle_vv_bf16m1_b16_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfle_vf_bf16m1_b16_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfle.mask.nxv4bf16.bf16.i64(<vscale x 4 x i1> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfle_vf_bf16m1_b16_m(vbool16_t mask, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfle_vf_bf16m1_b16_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfle_vv_bf16m2_b8_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfle.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x i1> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfle_vv_bf16m2_b8_m(vbool8_t mask, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vmfle_vv_bf16m2_b8_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfle_vf_bf16m2_b8_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfle.mask.nxv8bf16.bf16.i64(<vscale x 8 x i1> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfle_vf_bf16m2_b8_m(vbool8_t mask, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfle_vf_bf16m2_b8_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfle_vv_bf16m4_b4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfle.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x i1> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfle_vv_bf16m4_b4_m(vbool4_t mask, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vmfle_vv_bf16m4_b4_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfle_vf_bf16m4_b4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfle.mask.nxv16bf16.bf16.i64(<vscale x 16 x i1> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfle_vf_bf16m4_b4_m(vbool4_t mask, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfle_vf_bf16m4_b4_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfle_vv_bf16m8_b2_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfle.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x i1> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfle_vv_bf16m8_b2_m(vbool2_t mask, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vmfle_vv_bf16m8_b2_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfle_vf_bf16m8_b2_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfle.mask.nxv32bf16.bf16.i64(<vscale x 32 x i1> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfle_vf_bf16m8_b2_m(vbool2_t mask, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfle_vf_bf16m8_b2_m(mask, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vmflt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vmflt.c
new file mode 100644
index 0000000000000..f64eee3effdaf
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vmflt.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmflt_vv_bf16mf4_b64(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmflt.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmflt_vv_bf16mf4_b64(vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vmflt_vv_bf16mf4_b64(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmflt_vf_bf16mf4_b64(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmflt.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmflt_vf_bf16mf4_b64(vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmflt_vf_bf16mf4_b64(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmflt_vv_bf16mf2_b32(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmflt.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmflt_vv_bf16mf2_b32(vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vmflt_vv_bf16mf2_b32(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmflt_vf_bf16mf2_b32(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmflt.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmflt_vf_bf16mf2_b32(vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmflt_vf_bf16mf2_b32(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmflt_vv_bf16m1_b16(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmflt.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmflt_vv_bf16m1_b16(vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vmflt_vv_bf16m1_b16(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmflt_vf_bf16m1_b16(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmflt.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmflt_vf_bf16m1_b16(vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmflt_vf_bf16m1_b16(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmflt_vv_bf16m2_b8(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmflt.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmflt_vv_bf16m2_b8(vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vmflt_vv_bf16m2_b8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmflt_vf_bf16m2_b8(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmflt.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmflt_vf_bf16m2_b8(vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmflt_vf_bf16m2_b8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmflt_vv_bf16m4_b4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmflt.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmflt_vv_bf16m4_b4(vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vmflt_vv_bf16m4_b4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmflt_vf_bf16m4_b4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmflt.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmflt_vf_bf16m4_b4(vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmflt_vf_bf16m4_b4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmflt_vv_bf16m8_b2(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmflt.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmflt_vv_bf16m8_b2(vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vmflt_vv_bf16m8_b2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmflt_vf_bf16m8_b2(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmflt.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmflt_vf_bf16m8_b2(vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmflt_vf_bf16m8_b2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmflt_vv_bf16mf4_b64_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmflt.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x i1> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmflt_vv_bf16mf4_b64_m(vbool64_t mask, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vmflt_vv_bf16mf4_b64_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmflt_vf_bf16mf4_b64_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmflt.mask.nxv1bf16.bf16.i64(<vscale x 1 x i1> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmflt_vf_bf16mf4_b64_m(vbool64_t mask, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmflt_vf_bf16mf4_b64_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmflt_vv_bf16mf2_b32_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmflt.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x i1> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmflt_vv_bf16mf2_b32_m(vbool32_t mask, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vmflt_vv_bf16mf2_b32_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmflt_vf_bf16mf2_b32_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmflt.mask.nxv2bf16.bf16.i64(<vscale x 2 x i1> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmflt_vf_bf16mf2_b32_m(vbool32_t mask, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmflt_vf_bf16mf2_b32_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmflt_vv_bf16m1_b16_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmflt.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x i1> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmflt_vv_bf16m1_b16_m(vbool16_t mask, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vmflt_vv_bf16m1_b16_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmflt_vf_bf16m1_b16_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmflt.mask.nxv4bf16.bf16.i64(<vscale x 4 x i1> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmflt_vf_bf16m1_b16_m(vbool16_t mask, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmflt_vf_bf16m1_b16_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmflt_vv_bf16m2_b8_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmflt.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x i1> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmflt_vv_bf16m2_b8_m(vbool8_t mask, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vmflt_vv_bf16m2_b8_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmflt_vf_bf16m2_b8_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmflt.mask.nxv8bf16.bf16.i64(<vscale x 8 x i1> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmflt_vf_bf16m2_b8_m(vbool8_t mask, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmflt_vf_bf16m2_b8_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmflt_vv_bf16m4_b4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmflt.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x i1> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmflt_vv_bf16m4_b4_m(vbool4_t mask, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vmflt_vv_bf16m4_b4_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmflt_vf_bf16m4_b4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmflt.mask.nxv16bf16.bf16.i64(<vscale x 16 x i1> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmflt_vf_bf16m4_b4_m(vbool4_t mask, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmflt_vf_bf16m4_b4_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmflt_vv_bf16m8_b2_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmflt.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x i1> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmflt_vv_bf16m8_b2_m(vbool2_t mask, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vmflt_vv_bf16m8_b2_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmflt_vf_bf16m8_b2_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmflt.mask.nxv32bf16.bf16.i64(<vscale x 32 x i1> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmflt_vf_bf16m8_b2_m(vbool2_t mask, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmflt_vf_bf16m8_b2_m(mask, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vmfne.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vmfne.c
new file mode 100644
index 0000000000000..809ea5628e394
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vmfne.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfne_vv_bf16mf4_b64(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfne.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfne_vv_bf16mf4_b64(vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vmfne_vv_bf16mf4_b64(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfne_vf_bf16mf4_b64(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfne.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfne_vf_bf16mf4_b64(vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfne_vf_bf16mf4_b64(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfne_vv_bf16mf2_b32(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfne.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfne_vv_bf16mf2_b32(vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vmfne_vv_bf16mf2_b32(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfne_vf_bf16mf2_b32(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfne.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfne_vf_bf16mf2_b32(vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfne_vf_bf16mf2_b32(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfne_vv_bf16m1_b16(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfne.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfne_vv_bf16m1_b16(vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vmfne_vv_bf16m1_b16(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfne_vf_bf16m1_b16(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfne.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfne_vf_bf16m1_b16(vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfne_vf_bf16m1_b16(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfne_vv_bf16m2_b8(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfne.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfne_vv_bf16m2_b8(vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vmfne_vv_bf16m2_b8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfne_vf_bf16m2_b8(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfne.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfne_vf_bf16m2_b8(vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfne_vf_bf16m2_b8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfne_vv_bf16m4_b4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfne.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfne_vv_bf16m4_b4(vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vmfne_vv_bf16m4_b4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfne_vf_bf16m4_b4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfne.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfne_vf_bf16m4_b4(vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfne_vf_bf16m4_b4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfne_vv_bf16m8_b2(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfne.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfne_vv_bf16m8_b2(vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vmfne_vv_bf16m8_b2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfne_vf_bf16m8_b2(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfne.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfne_vf_bf16m8_b2(vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfne_vf_bf16m8_b2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfne_vv_bf16mf4_b64_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x i1> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfne_vv_bf16mf4_b64_m(vbool64_t mask, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vmfne_vv_bf16mf4_b64_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfne_vf_bf16mf4_b64_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1bf16.bf16.i64(<vscale x 1 x i1> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfne_vf_bf16mf4_b64_m(vbool64_t mask, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfne_vf_bf16mf4_b64_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfne_vv_bf16mf2_b32_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfne.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x i1> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfne_vv_bf16mf2_b32_m(vbool32_t mask, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vmfne_vv_bf16mf2_b32_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfne_vf_bf16mf2_b32_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfne.mask.nxv2bf16.bf16.i64(<vscale x 2 x i1> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfne_vf_bf16mf2_b32_m(vbool32_t mask, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfne_vf_bf16mf2_b32_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfne_vv_bf16m1_b16_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfne.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x i1> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfne_vv_bf16m1_b16_m(vbool16_t mask, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vmfne_vv_bf16m1_b16_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfne_vf_bf16m1_b16_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfne.mask.nxv4bf16.bf16.i64(<vscale x 4 x i1> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfne_vf_bf16m1_b16_m(vbool16_t mask, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfne_vf_bf16m1_b16_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfne_vv_bf16m2_b8_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfne.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x i1> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfne_vv_bf16m2_b8_m(vbool8_t mask, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vmfne_vv_bf16m2_b8_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfne_vf_bf16m2_b8_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfne.mask.nxv8bf16.bf16.i64(<vscale x 8 x i1> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfne_vf_bf16m2_b8_m(vbool8_t mask, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfne_vf_bf16m2_b8_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfne_vv_bf16m4_b4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfne.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x i1> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfne_vv_bf16m4_b4_m(vbool4_t mask, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vmfne_vv_bf16m4_b4_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfne_vf_bf16m4_b4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfne.mask.nxv16bf16.bf16.i64(<vscale x 16 x i1> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfne_vf_bf16m4_b4_m(vbool4_t mask, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfne_vf_bf16m4_b4_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfne_vv_bf16m8_b2_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfne.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x i1> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfne_vv_bf16m8_b2_m(vbool2_t mask, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vmfne_vv_bf16m8_b2_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfne_vf_bf16m8_b2_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfne.mask.nxv32bf16.bf16.i64(<vscale x 32 x i1> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfne_vf_bf16m8_b2_m(vbool2_t mask, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfne_vf_bf16m8_b2_m(mask, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfadd.c
new file mode 100644
index 0000000000000..9d6b071c2768c
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfadd.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfadd_vv_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfadd.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfadd_vv_bf16mf4(vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfadd(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfadd_vf_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfadd.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfadd_vf_bf16mf4(vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfadd_vv_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfadd.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfadd_vv_bf16mf2(vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfadd(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfadd_vf_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfadd.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfadd_vf_bf16mf2(vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfadd_vv_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfadd.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfadd_vv_bf16m1(vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfadd(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfadd_vf_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfadd.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfadd_vf_bf16m1(vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfadd_vv_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfadd.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfadd_vv_bf16m2(vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfadd(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfadd_vf_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfadd.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfadd_vf_bf16m2(vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfadd_vv_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfadd.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfadd_vv_bf16m4(vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfadd(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfadd_vf_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfadd.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfadd_vf_bf16m4(vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfadd_vv_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfadd.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfadd_vv_bf16m8(vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfadd(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfadd_vf_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfadd.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfadd_vf_bf16m8(vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfadd_vv_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfadd.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfadd_vv_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfadd(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfadd_vf_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfadd.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfadd_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfadd_vv_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfadd.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfadd_vv_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfadd(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfadd_vf_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfadd.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfadd_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfadd_vv_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfadd.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfadd_vv_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfadd(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfadd_vf_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfadd.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfadd_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfadd_vv_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfadd.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfadd_vv_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfadd(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfadd_vf_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfadd.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfadd_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfadd_vv_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfadd.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfadd_vv_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfadd(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfadd_vf_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfadd.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfadd_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfadd_vv_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfadd.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfadd_vv_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfadd(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfadd_vf_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfadd.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfadd_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd(mask, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfclass.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfclass.c
new file mode 100644
index 0000000000000..2760f85a45d3c
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfclass.c
@@ -0,0 +1,134 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vfclass_v_bf16mf4_u16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfclass.nxv1bf16.i64(<vscale x 1 x i16> poison, <vscale x 1 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vfclass_v_bf16mf4_u16mf4(vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfclass(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vfclass_v_bf16mf2_u16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfclass.nxv2bf16.i64(<vscale x 2 x i16> poison, <vscale x 2 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vfclass_v_bf16mf2_u16mf2(vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfclass(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vfclass_v_bf16m1_u16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfclass.nxv4bf16.i64(<vscale x 4 x i16> poison, <vscale x 4 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vfclass_v_bf16m1_u16m1(vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfclass(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vfclass_v_bf16m2_u16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfclass.nxv8bf16.i64(<vscale x 8 x i16> poison, <vscale x 8 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vfclass_v_bf16m2_u16m2(vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfclass(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vfclass_v_bf16m4_u16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfclass.nxv16bf16.i64(<vscale x 16 x i16> poison, <vscale x 16 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vfclass_v_bf16m4_u16m4(vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfclass(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vfclass_v_bf16m8_u16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vfclass.nxv32bf16.i64(<vscale x 32 x i16> poison, <vscale x 32 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vfclass_v_bf16m8_u16m8(vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfclass(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vfclass_v_bf16mf4_u16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfclass.mask.nxv1bf16.i64(<vscale x 1 x i16> poison, <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vfclass_v_bf16mf4_u16mf4_m(vbool64_t vm, vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfclass(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vfclass_v_bf16mf2_u16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfclass.mask.nxv2bf16.i64(<vscale x 2 x i16> poison, <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vfclass_v_bf16mf2_u16mf2_m(vbool32_t vm, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfclass(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vfclass_v_bf16m1_u16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfclass.mask.nxv4bf16.i64(<vscale x 4 x i16> poison, <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vfclass_v_bf16m1_u16m1_m(vbool16_t vm, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfclass(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vfclass_v_bf16m2_u16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfclass.mask.nxv8bf16.i64(<vscale x 8 x i16> poison, <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vfclass_v_bf16m2_u16m2_m(vbool8_t vm, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfclass(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vfclass_v_bf16m4_u16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfclass.mask.nxv16bf16.i64(<vscale x 16 x i16> poison, <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vfclass_v_bf16m4_u16m4_m(vbool4_t vm, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfclass(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vfclass_v_bf16m8_u16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vfclass.mask.nxv32bf16.i64(<vscale x 32 x i16> poison, <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vfclass_v_bf16m8_u16m8_m(vbool2_t vm, vbfloat16m8_t vs2,
+ size_t vl) {
+ return __riscv_vfclass(vm, vs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfmacc.c
new file mode 100644
index 0000000000000..ae3f1f24eb762
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfmacc.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmacc_vv_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmacc.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmacc_vv_bf16mf4(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmacc(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmacc_vf_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmacc.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmacc_vf_bf16mf4(vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmacc(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmacc_vv_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmacc.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmacc_vv_bf16mf2(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmacc(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmacc_vf_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmacc.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmacc_vf_bf16mf2(vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmacc(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmacc_vv_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmacc.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmacc_vv_bf16m1(vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmacc(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmacc_vf_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmacc.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmacc_vf_bf16m1(vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmacc(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmacc_vv_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmacc.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmacc_vv_bf16m2(vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmacc(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmacc_vf_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmacc.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmacc_vf_bf16m2(vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmacc(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmacc_vv_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmacc.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmacc_vv_bf16m4(vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmacc(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmacc_vf_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmacc.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmacc_vf_bf16m4(vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmacc(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmacc_vv_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmacc.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmacc_vv_bf16m8(vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmacc(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmacc_vf_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmacc.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmacc_vf_bf16m8(vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmacc(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmacc_vv_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmacc.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmacc_vv_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmacc(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmacc_vf_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmacc.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmacc_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmacc(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmacc_vv_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmacc.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmacc_vv_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmacc(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmacc_vf_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmacc.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmacc_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmacc(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmacc_vv_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmacc.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmacc_vv_bf16m1_m(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmacc(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmacc_vf_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmacc.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmacc_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmacc(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmacc_vv_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmacc.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmacc_vv_bf16m2_m(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmacc(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmacc_vf_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmacc.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmacc_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmacc(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmacc_vv_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmacc.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmacc_vv_bf16m4_m(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmacc(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmacc_vf_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmacc.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmacc_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmacc(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmacc_vv_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmacc.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmacc_vv_bf16m8_m(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmacc(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmacc_vf_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmacc.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmacc_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmacc(mask, vd, rs1, vs2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfmadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfmadd.c
new file mode 100644
index 0000000000000..db2184c6e1dba
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfmadd.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmadd_vv_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmadd.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmadd_vv_bf16mf4(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmadd(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmadd_vf_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmadd.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmadd_vf_bf16mf4(vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmadd(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmadd_vv_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmadd.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmadd_vv_bf16mf2(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmadd(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmadd_vf_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmadd.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmadd_vf_bf16mf2(vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmadd(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmadd_vv_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmadd.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmadd_vv_bf16m1(vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmadd(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmadd_vf_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmadd.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmadd_vf_bf16m1(vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmadd(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmadd_vv_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmadd.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmadd_vv_bf16m2(vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmadd(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmadd_vf_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmadd.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmadd_vf_bf16m2(vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmadd(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmadd_vv_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmadd.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmadd_vv_bf16m4(vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmadd(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmadd_vf_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmadd.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmadd_vf_bf16m4(vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmadd(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmadd_vv_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmadd.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmadd_vv_bf16m8(vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmadd(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmadd_vf_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmadd.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmadd_vf_bf16m8(vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmadd(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmadd_vv_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmadd.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmadd_vv_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmadd(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmadd_vf_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmadd.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmadd_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmadd(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmadd_vv_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmadd.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmadd_vv_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmadd(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmadd_vf_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmadd.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmadd_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmadd(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmadd_vv_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmadd.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmadd_vv_bf16m1_m(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmadd(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmadd_vf_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmadd.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmadd_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmadd(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmadd_vv_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmadd.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmadd_vv_bf16m2_m(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmadd(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmadd_vf_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmadd.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmadd_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmadd(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmadd_vv_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmadd.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmadd_vv_bf16m4_m(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmadd(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmadd_vf_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmadd.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmadd_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmadd(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmadd_vv_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmadd.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmadd_vv_bf16m8_m(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmadd(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmadd_vf_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmadd.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmadd_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmadd(mask, vd, rs1, vs2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfmax.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfmax.c
new file mode 100644
index 0000000000000..66497bfb15934
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfmax.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmax_vv_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmax.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmax_vv_bf16mf4(vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfmax(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmax_vf_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmax.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmax_vf_bf16mf4(vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmax_vv_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmax.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmax_vv_bf16mf2(vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfmax(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmax_vf_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmax.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmax_vf_bf16mf2(vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmax_vv_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmax.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmax_vv_bf16m1(vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfmax(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmax_vf_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmax.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmax_vf_bf16m1(vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmax_vv_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmax.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmax_vv_bf16m2(vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfmax(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmax_vf_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmax.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmax_vf_bf16m2(vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmax_vv_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmax.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmax_vv_bf16m4(vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfmax(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmax_vf_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmax.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmax_vf_bf16m4(vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmax_vv_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmax.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmax_vv_bf16m8(vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfmax(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmax_vf_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmax.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmax_vf_bf16m8(vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmax_vv_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmax.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmax_vv_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfmax(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmax_vf_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmax.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmax_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmax_vv_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmax.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmax_vv_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfmax(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmax_vf_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmax.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmax_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmax_vv_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmax.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmax_vv_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfmax(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmax_vf_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmax.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmax_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmax_vv_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmax.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmax_vv_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfmax(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmax_vf_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmax.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmax_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmax_vv_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmax.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmax_vv_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfmax(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmax_vf_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmax.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmax_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmax_vv_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmax.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmax_vv_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfmax(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmax_vf_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmax.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmax_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax(mask, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfmerge.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfmerge.c
new file mode 100644
index 0000000000000..1dc290bf1a222
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfmerge.c
@@ -0,0 +1,69 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmerge_vfm_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmerge.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmerge_vfm_bf16mf4(vbfloat16mf4_t op1, __bf16 op2, vbool64_t mask, size_t vl) {
+ return __riscv_vfmerge(op1, op2, mask, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmerge_vfm_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmerge.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmerge_vfm_bf16mf2(vbfloat16mf2_t op1, __bf16 op2, vbool32_t mask, size_t vl) {
+ return __riscv_vfmerge(op1, op2, mask, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmerge_vfm_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmerge.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmerge_vfm_bf16m1(vbfloat16m1_t op1, __bf16 op2, vbool16_t mask, size_t vl) {
+ return __riscv_vfmerge(op1, op2, mask, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmerge_vfm_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmerge.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmerge_vfm_bf16m2(vbfloat16m2_t op1, __bf16 op2, vbool8_t mask, size_t vl) {
+ return __riscv_vfmerge(op1, op2, mask, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmerge_vfm_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmerge.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmerge_vfm_bf16m4(vbfloat16m4_t op1, __bf16 op2, vbool4_t mask, size_t vl) {
+ return __riscv_vfmerge(op1, op2, mask, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmerge_vfm_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmerge.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmerge_vfm_bf16m8(vbfloat16m8_t op1, __bf16 op2, vbool2_t mask, size_t vl) {
+ return __riscv_vfmerge(op1, op2, mask, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfmin.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfmin.c
new file mode 100644
index 0000000000000..1564d1195ecef
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfmin.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmin_vv_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmin.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmin_vv_bf16mf4(vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfmin(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmin_vf_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmin.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmin_vf_bf16mf4(vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmin_vv_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmin.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmin_vv_bf16mf2(vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfmin(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmin_vf_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmin.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmin_vf_bf16mf2(vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmin_vv_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmin.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmin_vv_bf16m1(vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfmin(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmin_vf_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmin.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmin_vf_bf16m1(vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmin_vv_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmin.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmin_vv_bf16m2(vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfmin(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmin_vf_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmin.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmin_vf_bf16m2(vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmin_vv_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmin.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmin_vv_bf16m4(vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfmin(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmin_vf_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmin.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmin_vf_bf16m4(vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmin_vv_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmin.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmin_vv_bf16m8(vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfmin(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmin_vf_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmin.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmin_vf_bf16m8(vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmin_vv_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmin.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmin_vv_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfmin(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmin_vf_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmin.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmin_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmin_vv_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmin.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmin_vv_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfmin(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmin_vf_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmin.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmin_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmin_vv_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmin.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmin_vv_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfmin(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmin_vf_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmin.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmin_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmin_vv_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmin.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmin_vv_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfmin(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmin_vf_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmin.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmin_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmin_vv_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmin.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmin_vv_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfmin(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmin_vf_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmin.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmin_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmin_vv_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmin.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmin_vv_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfmin(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmin_vf_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmin.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmin_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin(mask, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfmsac.c
new file mode 100644
index 0000000000000..0384e7da29e08
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfmsac.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmsac_vv_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmsac.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmsac_vv_bf16mf4(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmsac(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmsac_vf_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmsac.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmsac_vf_bf16mf4(vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmsac(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmsac_vv_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmsac.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmsac_vv_bf16mf2(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmsac(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmsac_vf_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmsac.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmsac_vf_bf16mf2(vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmsac(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmsac_vv_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmsac.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmsac_vv_bf16m1(vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmsac(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmsac_vf_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmsac.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmsac_vf_bf16m1(vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmsac(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmsac_vv_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmsac.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmsac_vv_bf16m2(vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmsac(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmsac_vf_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmsac.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmsac_vf_bf16m2(vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmsac(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmsac_vv_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmsac.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmsac_vv_bf16m4(vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmsac(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmsac_vf_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmsac.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmsac_vf_bf16m4(vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmsac(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmsac_vv_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmsac.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmsac_vv_bf16m8(vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmsac(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmsac_vf_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmsac.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmsac_vf_bf16m8(vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmsac(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmsac_vv_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmsac.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmsac_vv_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmsac(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmsac_vf_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmsac.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmsac_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmsac(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmsac_vv_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmsac.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmsac_vv_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmsac(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmsac_vf_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmsac.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmsac_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmsac(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmsac_vv_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmsac.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmsac_vv_bf16m1_m(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmsac(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmsac_vf_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmsac.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmsac_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmsac(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmsac_vv_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmsac.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmsac_vv_bf16m2_m(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmsac(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmsac_vf_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmsac.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmsac_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmsac(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmsac_vv_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmsac.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmsac_vv_bf16m4_m(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmsac(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmsac_vf_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmsac.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmsac_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmsac(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmsac_vv_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmsac.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmsac_vv_bf16m8_m(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmsac(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmsac_vf_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmsac.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmsac_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmsac(mask, vd, rs1, vs2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfmsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfmsub.c
new file mode 100644
index 0000000000000..306f189b54c89
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfmsub.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmsub_vv_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmsub.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmsub_vv_bf16mf4(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmsub(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmsub_vf_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmsub.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmsub_vf_bf16mf4(vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmsub(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmsub_vv_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmsub.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmsub_vv_bf16mf2(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmsub(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmsub_vf_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmsub.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmsub_vf_bf16mf2(vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmsub(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmsub_vv_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmsub.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmsub_vv_bf16m1(vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmsub(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmsub_vf_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmsub.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmsub_vf_bf16m1(vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmsub(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmsub_vv_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmsub.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmsub_vv_bf16m2(vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmsub(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmsub_vf_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmsub.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmsub_vf_bf16m2(vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmsub(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmsub_vv_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmsub.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmsub_vv_bf16m4(vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmsub(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmsub_vf_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmsub.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmsub_vf_bf16m4(vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmsub(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmsub_vv_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmsub.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmsub_vv_bf16m8(vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmsub(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmsub_vf_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmsub.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmsub_vf_bf16m8(vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmsub(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmsub_vv_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmsub.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmsub_vv_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmsub(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmsub_vf_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmsub.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmsub_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmsub(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmsub_vv_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmsub.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmsub_vv_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmsub(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmsub_vf_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmsub.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmsub_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmsub(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmsub_vv_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmsub.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmsub_vv_bf16m1_m(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmsub(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmsub_vf_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmsub.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmsub_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmsub(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmsub_vv_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmsub.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmsub_vv_bf16m2_m(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmsub(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmsub_vf_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmsub.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmsub_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmsub(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmsub_vv_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmsub.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmsub_vv_bf16m4_m(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmsub(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmsub_vf_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmsub.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmsub_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmsub(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmsub_vv_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmsub.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmsub_vv_bf16m8_m(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmsub(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmsub_vf_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmsub.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmsub_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmsub(mask, vd, rs1, vs2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfmul.c
new file mode 100644
index 0000000000000..fffd83a12d36c
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfmul.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmul_vv_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmul.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmul_vv_bf16mf4(vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfmul(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmul_vf_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmul.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmul_vf_bf16mf4(vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmul_vv_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmul.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmul_vv_bf16mf2(vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfmul(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmul_vf_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmul.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmul_vf_bf16mf2(vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmul_vv_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmul.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmul_vv_bf16m1(vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfmul(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmul_vf_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmul.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmul_vf_bf16m1(vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmul_vv_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmul.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmul_vv_bf16m2(vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfmul(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmul_vf_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmul.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmul_vf_bf16m2(vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmul_vv_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmul.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmul_vv_bf16m4(vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfmul(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmul_vf_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmul.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmul_vf_bf16m4(vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmul_vv_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmul.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmul_vv_bf16m8(vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfmul(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmul_vf_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmul.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmul_vf_bf16m8(vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmul_vv_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmul.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmul_vv_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfmul(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmul_vf_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmul.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmul_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmul_vv_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmul.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmul_vv_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfmul(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmul_vf_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmul.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmul_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmul_vv_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmul.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmul_vv_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfmul(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmul_vf_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmul.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmul_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmul_vv_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmul.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmul_vv_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfmul(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmul_vf_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmul.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmul_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmul_vv_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmul.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmul_vv_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfmul(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmul_vf_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmul.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmul_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmul_vv_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmul.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmul_vv_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfmul(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmul_vf_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmul.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmul_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul(mask, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfmv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfmv.c
new file mode 100644
index 0000000000000..f85378ff0f37a
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfmv.c
@@ -0,0 +1,69 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local bfloat @test_vfmv_f_s_bf16mf4_bf16(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[SRC:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call bfloat @llvm.riscv.vfmv.f.s.nxv1bf16(<vscale x 1 x bfloat> [[SRC]])
+// CHECK-RV64-NEXT: ret bfloat [[TMP0]]
+//
+__bf16 test_vfmv_f_s_bf16mf4_bf16(vbfloat16mf4_t src) {
+ return __riscv_vfmv_f(src);
+}
+
+// CHECK-RV64-LABEL: define dso_local bfloat @test_vfmv_f_s_bf16mf2_bf16(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[SRC:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call bfloat @llvm.riscv.vfmv.f.s.nxv2bf16(<vscale x 2 x bfloat> [[SRC]])
+// CHECK-RV64-NEXT: ret bfloat [[TMP0]]
+//
+__bf16 test_vfmv_f_s_bf16mf2_bf16(vbfloat16mf2_t src) {
+ return __riscv_vfmv_f(src);
+}
+
+// CHECK-RV64-LABEL: define dso_local bfloat @test_vfmv_f_s_bf16m1_bf16(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[SRC:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call bfloat @llvm.riscv.vfmv.f.s.nxv4bf16(<vscale x 4 x bfloat> [[SRC]])
+// CHECK-RV64-NEXT: ret bfloat [[TMP0]]
+//
+__bf16 test_vfmv_f_s_bf16m1_bf16(vbfloat16m1_t src) {
+ return __riscv_vfmv_f(src);
+}
+
+// CHECK-RV64-LABEL: define dso_local bfloat @test_vfmv_f_s_bf16m2_bf16(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[SRC:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call bfloat @llvm.riscv.vfmv.f.s.nxv8bf16(<vscale x 8 x bfloat> [[SRC]])
+// CHECK-RV64-NEXT: ret bfloat [[TMP0]]
+//
+__bf16 test_vfmv_f_s_bf16m2_bf16(vbfloat16m2_t src) {
+ return __riscv_vfmv_f(src);
+}
+
+// CHECK-RV64-LABEL: define dso_local bfloat @test_vfmv_f_s_bf16m4_bf16(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[SRC:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call bfloat @llvm.riscv.vfmv.f.s.nxv16bf16(<vscale x 16 x bfloat> [[SRC]])
+// CHECK-RV64-NEXT: ret bfloat [[TMP0]]
+//
+__bf16 test_vfmv_f_s_bf16m4_bf16(vbfloat16m4_t src) {
+ return __riscv_vfmv_f(src);
+}
+
+// CHECK-RV64-LABEL: define dso_local bfloat @test_vfmv_f_s_bf16m8_bf16(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[SRC:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call bfloat @llvm.riscv.vfmv.f.s.nxv32bf16(<vscale x 32 x bfloat> [[SRC]])
+// CHECK-RV64-NEXT: ret bfloat [[TMP0]]
+//
+__bf16 test_vfmv_f_s_bf16m8_bf16(vbfloat16m8_t src) {
+ return __riscv_vfmv_f(src);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfncvt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfncvt.c
new file mode 100644
index 0000000000000..fb635d61670c2
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfncvt.c
@@ -0,0 +1,724 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_x_f_w_bf16mf4_i8mf8(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.x.f.w.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> poison, <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vfncvt_x_f_w_bf16mf4_i8mf8(vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfncvt_x(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_x_f_w_bf16mf2_i8mf4(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.x.f.w.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> poison, <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vfncvt_x_f_w_bf16mf2_i8mf4(vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_x(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_x_f_w_bf16m1_i8mf2(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.x.f.w.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> poison, <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vfncvt_x_f_w_bf16m1_i8mf2(vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_x(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_x_f_w_bf16m2_i8m1(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.x.f.w.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> poison, <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vfncvt_x_f_w_bf16m2_i8m1(vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_x(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_x_f_w_bf16m4_i8m2(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.x.f.w.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> poison, <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vfncvt_x_f_w_bf16m4_i8m2(vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_x(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_x_f_w_bf16m8_i8m4(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.x.f.w.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> poison, <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vfncvt_x_f_w_bf16m8_i8m4(vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_x(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_xu_f_w_bf16mf4_u8mf8(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> poison, <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vfncvt_xu_f_w_bf16mf4_u8mf8(vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_xu_f_w_bf16mf2_u8mf4(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> poison, <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vfncvt_xu_f_w_bf16mf2_u8mf4(vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_xu_f_w_bf16m1_u8mf2(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> poison, <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vfncvt_xu_f_w_bf16m1_u8mf2(vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_xu_f_w_bf16m2_u8m1(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> poison, <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vfncvt_xu_f_w_bf16m2_u8m1(vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_xu_f_w_bf16m4_u8m2(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> poison, <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vfncvt_xu_f_w_bf16m4_u8m2(vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_xu_f_w_bf16m8_u8m4(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> poison, <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vfncvt_xu_f_w_bf16m8_u8m4(vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfncvt_f_f_w_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfncvt.f.f.w.nxv1bf16.nxv1f32.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x float> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfncvt_f_f_w_bf16mf4(vfloat32mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_bf16(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfncvt_f_f_w_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfncvt.f.f.w.nxv2bf16.nxv2f32.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x float> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfncvt_f_f_w_bf16mf2(vfloat32m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_bf16(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfncvt_f_f_w_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfncvt.f.f.w.nxv4bf16.nxv4f32.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x float> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfncvt_f_f_w_bf16m1(vfloat32m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_bf16(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfncvt_f_f_w_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfncvt.f.f.w.nxv8bf16.nxv8f32.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x float> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfncvt_f_f_w_bf16m2(vfloat32m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_bf16(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfncvt_f_f_w_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfncvt.f.f.w.nxv16bf16.nxv16f32.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x float> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfncvt_f_f_w_bf16m4(vfloat32m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_bf16(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_x_f_w_bf16mf4_i8mf8_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> poison, <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vfncvt_x_f_w_bf16mf4_i8mf8_m(vbool64_t vm, vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_x_f_w_bf16mf2_i8mf4_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> poison, <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vfncvt_x_f_w_bf16mf2_i8mf4_m(vbool32_t vm, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_x_f_w_bf16m1_i8mf2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> poison, <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vfncvt_x_f_w_bf16m1_i8mf2_m(vbool16_t vm, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_x_f_w_bf16m2_i8m1_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> poison, <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vfncvt_x_f_w_bf16m2_i8m1_m(vbool8_t vm, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_x_f_w_bf16m4_i8m2_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> poison, <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vfncvt_x_f_w_bf16m4_i8m2_m(vbool4_t vm, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_x_f_w_bf16m8_i8m4_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> poison, <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vfncvt_x_f_w_bf16m8_i8m4_m(vbool2_t vm, vbfloat16m8_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_xu_f_w_bf16mf4_u8mf8_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> poison, <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vfncvt_xu_f_w_bf16mf4_u8mf8_m(vbool64_t vm, vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_xu_f_w_bf16mf2_u8mf4_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> poison, <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vfncvt_xu_f_w_bf16mf2_u8mf4_m(vbool32_t vm, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_xu_f_w_bf16m1_u8mf2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> poison, <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vfncvt_xu_f_w_bf16m1_u8mf2_m(vbool16_t vm, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_xu_f_w_bf16m2_u8m1_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> poison, <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vfncvt_xu_f_w_bf16m2_u8m1_m(vbool8_t vm, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_xu_f_w_bf16m4_u8m2_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> poison, <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vfncvt_xu_f_w_bf16m4_u8m2_m(vbool4_t vm, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_xu_f_w_bf16m8_u8m4_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> poison, <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vfncvt_xu_f_w_bf16m8_u8m4_m(vbool2_t vm, vbfloat16m8_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfncvt_f_f_w_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv1bf16.nxv1f32.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x float> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfncvt_f_f_w_bf16mf4_m(vbool64_t vm, vfloat32mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_f_bf16(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfncvt_f_f_w_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv2bf16.nxv2f32.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x float> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfncvt_f_f_w_bf16mf2_m(vbool32_t vm, vfloat32m1_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_f_bf16(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfncvt_f_f_w_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv4bf16.nxv4f32.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x float> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfncvt_f_f_w_bf16m1_m(vbool16_t vm, vfloat32m2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_f_bf16(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfncvt_f_f_w_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv8bf16.nxv8f32.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x float> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfncvt_f_f_w_bf16m2_m(vbool8_t vm, vfloat32m4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_f_bf16(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfncvt_f_f_w_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv16bf16.nxv16f32.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x float> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfncvt_f_f_w_bf16m4_m(vbool4_t vm, vfloat32m8_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_f_bf16(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_x_f_w_bf16mf4_i8mf8_rm(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.x.f.w.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> poison, <vscale x 1 x bfloat> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vfncvt_x_f_w_bf16mf4_i8mf8_rm(vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfncvt_x(vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_x_f_w_bf16mf2_i8mf4_rm(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.x.f.w.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> poison, <vscale x 2 x bfloat> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vfncvt_x_f_w_bf16mf2_i8mf4_rm(vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_x(vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_x_f_w_bf16m1_i8mf2_rm(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.x.f.w.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> poison, <vscale x 4 x bfloat> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vfncvt_x_f_w_bf16m1_i8mf2_rm(vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_x(vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_x_f_w_bf16m2_i8m1_rm(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.x.f.w.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> poison, <vscale x 8 x bfloat> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vfncvt_x_f_w_bf16m2_i8m1_rm(vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_x(vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_x_f_w_bf16m4_i8m2_rm(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.x.f.w.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> poison, <vscale x 16 x bfloat> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vfncvt_x_f_w_bf16m4_i8m2_rm(vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_x(vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_x_f_w_bf16m8_i8m4_rm(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.x.f.w.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> poison, <vscale x 32 x bfloat> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vfncvt_x_f_w_bf16m8_i8m4_rm(vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_x(vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_xu_f_w_bf16mf4_u8mf8_rm(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> poison, <vscale x 1 x bfloat> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vfncvt_xu_f_w_bf16mf4_u8mf8_rm(vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu(vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_xu_f_w_bf16mf2_u8mf4_rm(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> poison, <vscale x 2 x bfloat> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vfncvt_xu_f_w_bf16mf2_u8mf4_rm(vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu(vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_xu_f_w_bf16m1_u8mf2_rm(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> poison, <vscale x 4 x bfloat> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vfncvt_xu_f_w_bf16m1_u8mf2_rm(vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu(vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_xu_f_w_bf16m2_u8m1_rm(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> poison, <vscale x 8 x bfloat> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vfncvt_xu_f_w_bf16m2_u8m1_rm(vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu(vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_xu_f_w_bf16m4_u8m2_rm(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> poison, <vscale x 16 x bfloat> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vfncvt_xu_f_w_bf16m4_u8m2_rm(vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu(vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_xu_f_w_bf16m8_u8m4_rm(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> poison, <vscale x 32 x bfloat> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vfncvt_xu_f_w_bf16m8_u8m4_rm(vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu(vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfncvt_f_f_w_bf16mf4_rm(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfncvt.f.f.w.nxv1bf16.nxv1f32.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x float> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfncvt_f_f_w_bf16mf4_rm(vfloat32mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_bf16(vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfncvt_f_f_w_bf16mf2_rm(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfncvt.f.f.w.nxv2bf16.nxv2f32.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x float> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfncvt_f_f_w_bf16mf2_rm(vfloat32m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_bf16(vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfncvt_f_f_w_bf16m1_rm(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfncvt.f.f.w.nxv4bf16.nxv4f32.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x float> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfncvt_f_f_w_bf16m1_rm(vfloat32m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_bf16(vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfncvt_f_f_w_bf16m2_rm(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfncvt.f.f.w.nxv8bf16.nxv8f32.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x float> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfncvt_f_f_w_bf16m2_rm(vfloat32m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_bf16(vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfncvt_f_f_w_bf16m4_rm(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfncvt.f.f.w.nxv16bf16.nxv16f32.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x float> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfncvt_f_f_w_bf16m4_rm(vfloat32m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_bf16(vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_x_f_w_bf16mf4_i8mf8_rm_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> poison, <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vfncvt_x_f_w_bf16mf4_i8mf8_rm_m(vbool64_t vm,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfncvt_x(vm, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_x_f_w_bf16mf2_i8mf4_rm_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> poison, <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vfncvt_x_f_w_bf16mf2_i8mf4_rm_m(vbool32_t vm,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_x(vm, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_x_f_w_bf16m1_i8mf2_rm_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> poison, <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vfncvt_x_f_w_bf16m1_i8mf2_rm_m(vbool16_t vm, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x(vm, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_x_f_w_bf16m2_i8m1_rm_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> poison, <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vfncvt_x_f_w_bf16m2_i8m1_rm_m(vbool8_t vm, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x(vm, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_x_f_w_bf16m4_i8m2_rm_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> poison, <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vfncvt_x_f_w_bf16m4_i8m2_rm_m(vbool4_t vm, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x(vm, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_x_f_w_bf16m8_i8m4_rm_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> poison, <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vfncvt_x_f_w_bf16m8_i8m4_rm_m(vbool2_t vm, vbfloat16m8_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x(vm, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_xu_f_w_bf16mf4_u8mf8_rm_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> poison, <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vfncvt_xu_f_w_bf16mf4_u8mf8_rm_m(vbool64_t vm,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu(vm, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_xu_f_w_bf16mf2_u8mf4_rm_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> poison, <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vfncvt_xu_f_w_bf16mf2_u8mf4_rm_m(vbool32_t vm,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu(vm, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_xu_f_w_bf16m1_u8mf2_rm_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> poison, <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vfncvt_xu_f_w_bf16m1_u8mf2_rm_m(vbool16_t vm,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu(vm, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_xu_f_w_bf16m2_u8m1_rm_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> poison, <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vfncvt_xu_f_w_bf16m2_u8m1_rm_m(vbool8_t vm, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu(vm, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_xu_f_w_bf16m4_u8m2_rm_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> poison, <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vfncvt_xu_f_w_bf16m4_u8m2_rm_m(vbool4_t vm, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu(vm, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_xu_f_w_bf16m8_u8m4_rm_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> poison, <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vfncvt_xu_f_w_bf16m8_u8m4_rm_m(vbool2_t vm, vbfloat16m8_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu(vm, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfncvt_f_f_w_bf16mf4_rm_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv1bf16.nxv1f32.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x float> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfncvt_f_f_w_bf16mf4_rm_m(vbool64_t vm, vfloat32mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_f_bf16(vm, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfncvt_f_f_w_bf16mf2_rm_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv2bf16.nxv2f32.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x float> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfncvt_f_f_w_bf16mf2_rm_m(vbool32_t vm, vfloat32m1_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_f_bf16(vm, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfncvt_f_f_w_bf16m1_rm_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv4bf16.nxv4f32.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x float> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfncvt_f_f_w_bf16m1_rm_m(vbool16_t vm, vfloat32m2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_f_bf16(vm, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfncvt_f_f_w_bf16m2_rm_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv8bf16.nxv8f32.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x float> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfncvt_f_f_w_bf16m2_rm_m(vbool8_t vm, vfloat32m4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_f_bf16(vm, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfncvt_f_f_w_bf16m4_rm_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv16bf16.nxv16f32.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x float> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfncvt_f_f_w_bf16m4_rm_m(vbool4_t vm, vfloat32m8_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_f_bf16(vm, vs2, __RISCV_FRM_RNE, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfncvt_rod.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfncvt_rod.c
new file mode 100644
index 0000000000000..1ad856df48c4b
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfncvt_rod.c
@@ -0,0 +1,113 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfncvt_rod_f_f_w_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.nxv1bf16.nxv1f32.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x float> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfncvt_rod_f_f_w_bf16mf4(vfloat32mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_rod_f_bf16(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfncvt_rod_f_f_w_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.nxv2bf16.nxv2f32.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x float> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfncvt_rod_f_f_w_bf16mf2(vfloat32m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_rod_f_bf16(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfncvt_rod_f_f_w_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.nxv4bf16.nxv4f32.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x float> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfncvt_rod_f_f_w_bf16m1(vfloat32m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_rod_f_bf16(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfncvt_rod_f_f_w_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.nxv8bf16.nxv8f32.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x float> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfncvt_rod_f_f_w_bf16m2(vfloat32m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_rod_f_bf16(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfncvt_rod_f_f_w_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.nxv16bf16.nxv16f32.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x float> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfncvt_rod_f_f_w_bf16m4(vfloat32m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_rod_f_bf16(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfncvt_rod_f_f_w_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv1bf16.nxv1f32.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x float> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfncvt_rod_f_f_w_bf16mf4_m(vbool64_t vm, vfloat32mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rod_f_bf16(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfncvt_rod_f_f_w_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv2bf16.nxv2f32.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x float> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfncvt_rod_f_f_w_bf16mf2_m(vbool32_t vm, vfloat32m1_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rod_f_bf16(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfncvt_rod_f_f_w_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv4bf16.nxv4f32.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x float> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfncvt_rod_f_f_w_bf16m1_m(vbool16_t vm, vfloat32m2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rod_f_bf16(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfncvt_rod_f_f_w_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv8bf16.nxv8f32.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x float> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfncvt_rod_f_f_w_bf16m2_m(vbool8_t vm, vfloat32m4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rod_f_bf16(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfncvt_rod_f_f_w_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv16bf16.nxv16f32.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x float> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfncvt_rod_f_f_w_bf16m4_m(vbool4_t vm, vfloat32m8_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rod_f_bf16(vm, vs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfncvt_rtz.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfncvt_rtz.c
new file mode 100644
index 0000000000000..12d08934bd49d
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfncvt_rtz.c
@@ -0,0 +1,267 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_rtz_x_f_w_bf16mf4_i8mf8(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> poison, <vscale x 1 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vfncvt_rtz_x_f_w_bf16mf4_i8mf8(vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_x(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_rtz_x_f_w_bf16mf2_i8mf4(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> poison, <vscale x 2 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vfncvt_rtz_x_f_w_bf16mf2_i8mf4(vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_x(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_rtz_x_f_w_bf16m1_i8mf2(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> poison, <vscale x 4 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vfncvt_rtz_x_f_w_bf16m1_i8mf2(vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_x(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_rtz_x_f_w_bf16m2_i8m1(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> poison, <vscale x 8 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vfncvt_rtz_x_f_w_bf16m2_i8m1(vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_x(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_rtz_x_f_w_bf16m4_i8m2(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> poison, <vscale x 16 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vfncvt_rtz_x_f_w_bf16m4_i8m2(vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_x(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_rtz_x_f_w_bf16m8_i8m4(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> poison, <vscale x 32 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vfncvt_rtz_x_f_w_bf16m8_i8m4(vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_x(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_rtz_xu_f_w_bf16mf4_u8mf8(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> poison, <vscale x 1 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vfncvt_rtz_xu_f_w_bf16mf4_u8mf8(vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_rtz_xu_f_w_bf16mf2_u8mf4(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> poison, <vscale x 2 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vfncvt_rtz_xu_f_w_bf16mf2_u8mf4(vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_rtz_xu_f_w_bf16m1_u8mf2(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> poison, <vscale x 4 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vfncvt_rtz_xu_f_w_bf16m1_u8mf2(vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_xu(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_rtz_xu_f_w_bf16m2_u8m1(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> poison, <vscale x 8 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vfncvt_rtz_xu_f_w_bf16m2_u8m1(vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_xu(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_rtz_xu_f_w_bf16m4_u8m2(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> poison, <vscale x 16 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vfncvt_rtz_xu_f_w_bf16m4_u8m2(vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_xu(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_rtz_xu_f_w_bf16m8_u8m4(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> poison, <vscale x 32 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vfncvt_rtz_xu_f_w_bf16m8_u8m4(vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_xu(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_rtz_x_f_w_bf16mf4_i8mf8_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> poison, <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vfncvt_rtz_x_f_w_bf16mf4_i8mf8_m(vbool64_t vm,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_x(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_rtz_x_f_w_bf16mf2_i8mf4_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> poison, <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vfncvt_rtz_x_f_w_bf16mf2_i8mf4_m(vbool32_t vm,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_x(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_rtz_x_f_w_bf16m1_i8mf2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> poison, <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vfncvt_rtz_x_f_w_bf16m1_i8mf2_m(vbool16_t vm, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_x(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_rtz_x_f_w_bf16m2_i8m1_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> poison, <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vfncvt_rtz_x_f_w_bf16m2_i8m1_m(vbool8_t vm, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_x(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_rtz_x_f_w_bf16m4_i8m2_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> poison, <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vfncvt_rtz_x_f_w_bf16m4_i8m2_m(vbool4_t vm, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_x(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_rtz_x_f_w_bf16m8_i8m4_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> poison, <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vfncvt_rtz_x_f_w_bf16m8_i8m4_m(vbool2_t vm, vbfloat16m8_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_x(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_rtz_xu_f_w_bf16mf4_u8mf8_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> poison, <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vfncvt_rtz_xu_f_w_bf16mf4_u8mf8_m(vbool64_t vm,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_rtz_xu_f_w_bf16mf2_u8mf4_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> poison, <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vfncvt_rtz_xu_f_w_bf16mf2_u8mf4_m(vbool32_t vm,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_rtz_xu_f_w_bf16m1_u8mf2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> poison, <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vfncvt_rtz_xu_f_w_bf16m1_u8mf2_m(vbool16_t vm,
+ vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_rtz_xu_f_w_bf16m2_u8m1_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> poison, <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vfncvt_rtz_xu_f_w_bf16m2_u8m1_m(vbool8_t vm, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_rtz_xu_f_w_bf16m4_u8m2_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> poison, <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vfncvt_rtz_xu_f_w_bf16m4_u8m2_m(vbool4_t vm, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_rtz_xu_f_w_bf16m8_u8m4_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> poison, <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vfncvt_rtz_xu_f_w_bf16m8_u8m4_m(vbool2_t vm, vbfloat16m8_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu(vm, vs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfnmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfnmacc.c
new file mode 100644
index 0000000000000..6f7928b763230
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfnmacc.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmacc_vv_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmacc.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmacc_vv_bf16mf4(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmacc(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmacc_vf_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmacc.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmacc_vf_bf16mf4(vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmacc(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmacc_vv_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmacc.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmacc_vv_bf16mf2(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmacc(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmacc_vf_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmacc.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmacc_vf_bf16mf2(vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmacc(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmacc_vv_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmacc.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmacc_vv_bf16m1(vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmacc(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmacc_vf_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmacc.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmacc_vf_bf16m1(vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmacc(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmacc_vv_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmacc.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmacc_vv_bf16m2(vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmacc(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmacc_vf_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmacc.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmacc_vf_bf16m2(vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmacc(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmacc_vv_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmacc.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmacc_vv_bf16m4(vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmacc(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmacc_vf_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmacc.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmacc_vf_bf16m4(vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmacc(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmacc_vv_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmacc.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmacc_vv_bf16m8(vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmacc(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmacc_vf_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmacc.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmacc_vf_bf16m8(vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmacc(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmacc_vv_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmacc.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmacc_vv_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmacc(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmacc_vf_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmacc.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmacc_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmacc(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmacc_vv_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmacc.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmacc_vv_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmacc(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmacc_vf_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmacc.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmacc_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmacc(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmacc_vv_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmacc.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmacc_vv_bf16m1_m(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmacc(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmacc_vf_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmacc.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmacc_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmacc(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmacc_vv_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmacc.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmacc_vv_bf16m2_m(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmacc(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmacc_vf_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmacc.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmacc_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmacc(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmacc_vv_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmacc.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmacc_vv_bf16m4_m(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmacc(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmacc_vf_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmacc.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmacc_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmacc(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmacc_vv_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmacc.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmacc_vv_bf16m8_m(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmacc(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmacc_vf_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmacc.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmacc_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmacc(mask, vd, rs1, vs2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfnmadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfnmadd.c
new file mode 100644
index 0000000000000..97d207040e5b2
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfnmadd.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmadd_vv_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmadd.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmadd_vv_bf16mf4(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmadd(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmadd_vf_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmadd.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmadd_vf_bf16mf4(vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmadd(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmadd_vv_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmadd.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmadd_vv_bf16mf2(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmadd(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmadd_vf_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmadd.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmadd_vf_bf16mf2(vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmadd(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmadd_vv_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmadd.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmadd_vv_bf16m1(vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmadd(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmadd_vf_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmadd.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmadd_vf_bf16m1(vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmadd(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmadd_vv_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmadd.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmadd_vv_bf16m2(vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmadd(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmadd_vf_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmadd.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmadd_vf_bf16m2(vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmadd(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmadd_vv_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmadd.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmadd_vv_bf16m4(vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmadd(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmadd_vf_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmadd.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmadd_vf_bf16m4(vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmadd(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmadd_vv_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmadd.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmadd_vv_bf16m8(vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmadd(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmadd_vf_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmadd.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmadd_vf_bf16m8(vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmadd(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmadd_vv_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmadd.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmadd_vv_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmadd(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmadd_vf_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmadd.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmadd_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmadd(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmadd_vv_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmadd.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmadd_vv_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmadd(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmadd_vf_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmadd.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmadd_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmadd(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmadd_vv_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmadd.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmadd_vv_bf16m1_m(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmadd(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmadd_vf_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmadd.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmadd_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmadd(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmadd_vv_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmadd.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmadd_vv_bf16m2_m(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmadd(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmadd_vf_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmadd.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmadd_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmadd(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmadd_vv_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmadd.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmadd_vv_bf16m4_m(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmadd(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmadd_vf_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmadd.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmadd_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmadd(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmadd_vv_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmadd.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmadd_vv_bf16m8_m(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmadd(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmadd_vf_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmadd.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmadd_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmadd(mask, vd, rs1, vs2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfnmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfnmsac.c
new file mode 100644
index 0000000000000..404b4f83c4f1f
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfnmsac.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmsac_vv_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsac.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmsac_vv_bf16mf4(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmsac(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmsac_vf_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsac.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmsac_vf_bf16mf4(vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmsac(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmsac_vv_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsac.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmsac_vv_bf16mf2(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmsac(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmsac_vf_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsac.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmsac_vf_bf16mf2(vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmsac(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmsac_vv_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsac.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmsac_vv_bf16m1(vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmsac(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmsac_vf_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsac.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmsac_vf_bf16m1(vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmsac(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmsac_vv_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsac.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmsac_vv_bf16m2(vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmsac(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmsac_vf_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsac.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmsac_vf_bf16m2(vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmsac(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmsac_vv_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsac.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmsac_vv_bf16m4(vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmsac(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmsac_vf_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsac.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmsac_vf_bf16m4(vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmsac(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmsac_vv_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmsac.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmsac_vv_bf16m8(vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmsac(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmsac_vf_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmsac.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmsac_vf_bf16m8(vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmsac(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmsac_vv_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsac.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmsac_vv_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmsac(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmsac_vf_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsac.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmsac_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmsac(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmsac_vv_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsac.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmsac_vv_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmsac(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmsac_vf_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsac.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmsac_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmsac(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmsac_vv_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsac.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmsac_vv_bf16m1_m(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmsac(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmsac_vf_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsac.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmsac_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmsac(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmsac_vv_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsac.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmsac_vv_bf16m2_m(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmsac(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmsac_vf_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsac.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmsac_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmsac(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmsac_vv_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsac.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmsac_vv_bf16m4_m(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmsac(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmsac_vf_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsac.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmsac_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmsac(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmsac_vv_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmsac.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmsac_vv_bf16m8_m(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmsac(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmsac_vf_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmsac.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmsac_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmsac(mask, vd, rs1, vs2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfnmsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfnmsub.c
new file mode 100644
index 0000000000000..3a520dd9af9bf
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfnmsub.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmsub_vv_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsub.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmsub_vv_bf16mf4(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmsub(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmsub_vf_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsub.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmsub_vf_bf16mf4(vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmsub(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmsub_vv_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsub.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmsub_vv_bf16mf2(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmsub(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmsub_vf_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsub.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmsub_vf_bf16mf2(vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmsub(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmsub_vv_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsub.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmsub_vv_bf16m1(vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmsub(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmsub_vf_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsub.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmsub_vf_bf16m1(vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmsub(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmsub_vv_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsub.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmsub_vv_bf16m2(vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmsub(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmsub_vf_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsub.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmsub_vf_bf16m2(vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmsub(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmsub_vv_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsub.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmsub_vv_bf16m4(vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmsub(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmsub_vf_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsub.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmsub_vf_bf16m4(vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmsub(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmsub_vv_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmsub.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmsub_vv_bf16m8(vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmsub(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmsub_vf_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmsub.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmsub_vf_bf16m8(vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmsub(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmsub_vv_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsub.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmsub_vv_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmsub(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmsub_vf_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsub.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmsub_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmsub(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmsub_vv_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsub.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmsub_vv_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmsub(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmsub_vf_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsub.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmsub_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmsub(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmsub_vv_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsub.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmsub_vv_bf16m1_m(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmsub(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmsub_vf_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsub.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmsub_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmsub(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmsub_vv_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsub.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmsub_vv_bf16m2_m(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmsub(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmsub_vf_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsub.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmsub_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmsub(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmsub_vv_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsub.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmsub_vv_bf16m4_m(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmsub(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmsub_vf_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsub.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmsub_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmsub(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmsub_vv_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmsub.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmsub_vv_bf16m8_m(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmsub(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmsub_vf_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmsub.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmsub_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmsub(mask, vd, rs1, vs2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfrec7.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfrec7.c
new file mode 100644
index 0000000000000..462b6acf8eee6
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfrec7.c
@@ -0,0 +1,129 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfrec7_v_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfrec7.nxv1bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfrec7_v_bf16mf4(vbfloat16mf4_t op1, size_t vl) {
+ return __riscv_vfrec7(op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfrec7_v_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfrec7.nxv2bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfrec7_v_bf16mf2(vbfloat16mf2_t op1, size_t vl) {
+ return __riscv_vfrec7(op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfrec7_v_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfrec7.nxv4bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfrec7_v_bf16m1(vbfloat16m1_t op1, size_t vl) {
+ return __riscv_vfrec7(op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfrec7_v_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfrec7.nxv8bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfrec7_v_bf16m2(vbfloat16m2_t op1, size_t vl) {
+ return __riscv_vfrec7(op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfrec7_v_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfrec7.nxv16bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfrec7_v_bf16m4(vbfloat16m4_t op1, size_t vl) {
+ return __riscv_vfrec7(op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfrec7_v_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfrec7.nxv32bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfrec7_v_bf16m8(vbfloat16m8_t op1, size_t vl) {
+ return __riscv_vfrec7(op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfrec7_v_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfrec7.mask.nxv1bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfrec7_v_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, size_t vl) {
+ return __riscv_vfrec7(mask, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfrec7_v_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfrec7.mask.nxv2bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfrec7_v_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, size_t vl) {
+ return __riscv_vfrec7(mask, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfrec7_v_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfrec7.mask.nxv4bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfrec7_v_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, size_t vl) {
+ return __riscv_vfrec7(mask, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfrec7_v_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfrec7.mask.nxv8bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfrec7_v_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, size_t vl) {
+ return __riscv_vfrec7(mask, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfrec7_v_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfrec7.mask.nxv16bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfrec7_v_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, size_t vl) {
+ return __riscv_vfrec7(mask, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfrec7_v_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfrec7.mask.nxv32bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfrec7_v_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, size_t vl) {
+ return __riscv_vfrec7(mask, op1, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfrsqrt7.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfrsqrt7.c
new file mode 100644
index 0000000000000..051fde7ef5472
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfrsqrt7.c
@@ -0,0 +1,129 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfrsqrt7_v_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfrsqrt7.nxv1bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfrsqrt7_v_bf16mf4(vbfloat16mf4_t op1, size_t vl) {
+ return __riscv_vfrsqrt7(op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfrsqrt7_v_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfrsqrt7.nxv2bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfrsqrt7_v_bf16mf2(vbfloat16mf2_t op1, size_t vl) {
+ return __riscv_vfrsqrt7(op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfrsqrt7_v_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfrsqrt7.nxv4bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfrsqrt7_v_bf16m1(vbfloat16m1_t op1, size_t vl) {
+ return __riscv_vfrsqrt7(op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfrsqrt7_v_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfrsqrt7.nxv8bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfrsqrt7_v_bf16m2(vbfloat16m2_t op1, size_t vl) {
+ return __riscv_vfrsqrt7(op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfrsqrt7_v_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfrsqrt7.nxv16bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfrsqrt7_v_bf16m4(vbfloat16m4_t op1, size_t vl) {
+ return __riscv_vfrsqrt7(op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfrsqrt7_v_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfrsqrt7.nxv32bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfrsqrt7_v_bf16m8(vbfloat16m8_t op1, size_t vl) {
+ return __riscv_vfrsqrt7(op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfrsqrt7_v_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv1bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfrsqrt7_v_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, size_t vl) {
+ return __riscv_vfrsqrt7(mask, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfrsqrt7_v_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv2bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfrsqrt7_v_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, size_t vl) {
+ return __riscv_vfrsqrt7(mask, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfrsqrt7_v_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv4bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfrsqrt7_v_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, size_t vl) {
+ return __riscv_vfrsqrt7(mask, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfrsqrt7_v_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv8bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfrsqrt7_v_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, size_t vl) {
+ return __riscv_vfrsqrt7(mask, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfrsqrt7_v_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv16bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfrsqrt7_v_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, size_t vl) {
+ return __riscv_vfrsqrt7(mask, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfrsqrt7_v_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv32bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfrsqrt7_v_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, size_t vl) {
+ return __riscv_vfrsqrt7(mask, op1, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfrsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfrsub.c
new file mode 100644
index 0000000000000..04941823d1917
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfrsub.c
@@ -0,0 +1,129 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfrsub_vf_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfrsub.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfrsub_vf_bf16mf4(vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfrsub_vf_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfrsub.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfrsub_vf_bf16mf2(vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfrsub_vf_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfrsub.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfrsub_vf_bf16m1(vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfrsub_vf_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfrsub.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfrsub_vf_bf16m2(vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfrsub_vf_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfrsub.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfrsub_vf_bf16m4(vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfrsub_vf_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfrsub.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfrsub_vf_bf16m8(vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfrsub_vf_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfrsub.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfrsub_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfrsub_vf_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfrsub.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfrsub_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfrsub_vf_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfrsub.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfrsub_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfrsub_vf_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfrsub.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfrsub_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfrsub_vf_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfrsub.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfrsub_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfrsub_vf_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfrsub.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfrsub_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub(mask, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfsgnj.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfsgnj.c
new file mode 100644
index 0000000000000..615deddf4254f
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfsgnj.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnj_vv_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnj.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnj_vv_bf16mf4(vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfsgnj(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnj_vf_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnj.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnj_vf_bf16mf4(vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnj_vv_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnj.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnj_vv_bf16mf2(vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfsgnj(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnj_vf_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnj.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnj_vf_bf16mf2(vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnj_vv_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnj.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnj_vv_bf16m1(vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfsgnj(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnj_vf_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnj.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnj_vf_bf16m1(vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnj_vv_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnj.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnj_vv_bf16m2(vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfsgnj(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnj_vf_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnj.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnj_vf_bf16m2(vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnj_vv_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnj.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnj_vv_bf16m4(vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfsgnj(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnj_vf_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnj.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnj_vf_bf16m4(vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnj_vv_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnj.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnj_vv_bf16m8(vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfsgnj(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnj_vf_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnj.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnj_vf_bf16m8(vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnj_vv_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnj.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnj_vv_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfsgnj(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnj_vf_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnj.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnj_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnj_vv_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnj.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnj_vv_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfsgnj(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnj_vf_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnj.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnj_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnj_vv_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnj.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnj_vv_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfsgnj(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnj_vf_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnj.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnj_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnj_vv_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnj.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnj_vv_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfsgnj(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnj_vf_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnj.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnj_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnj_vv_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnj.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnj_vv_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfsgnj(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnj_vf_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnj.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnj_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnj_vv_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnj.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnj_vv_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfsgnj(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnj_vf_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnj.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnj_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj(mask, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfsgnjn.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfsgnjn.c
new file mode 100644
index 0000000000000..a895e5f6ad6bd
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfsgnjn.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnjn_vv_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjn.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnjn_vv_bf16mf4(vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfsgnjn(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnjn_vf_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjn.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnjn_vf_bf16mf4(vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnjn_vv_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjn.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnjn_vv_bf16mf2(vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfsgnjn(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnjn_vf_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjn.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnjn_vf_bf16mf2(vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnjn_vv_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjn.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnjn_vv_bf16m1(vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfsgnjn(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnjn_vf_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjn.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnjn_vf_bf16m1(vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnjn_vv_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjn.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnjn_vv_bf16m2(vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfsgnjn(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnjn_vf_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjn.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnjn_vf_bf16m2(vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnjn_vv_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjn.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnjn_vv_bf16m4(vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfsgnjn(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnjn_vf_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjn.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnjn_vf_bf16m4(vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnjn_vv_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjn.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnjn_vv_bf16m8(vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfsgnjn(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnjn_vf_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjn.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnjn_vf_bf16m8(vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnjn_vv_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnjn_vv_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfsgnjn(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnjn_vf_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnjn_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnjn_vv_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnjn_vv_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfsgnjn(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnjn_vf_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnjn_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnjn_vv_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnjn_vv_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfsgnjn(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnjn_vf_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnjn_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnjn_vv_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnjn_vv_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfsgnjn(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnjn_vf_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnjn_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnjn_vv_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnjn_vv_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfsgnjn(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnjn_vf_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnjn_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnjn_vv_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnjn_vv_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfsgnjn(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnjn_vf_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnjn_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn(mask, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfsgnjx.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfsgnjx.c
new file mode 100644
index 0000000000000..0187516d00dc1
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfsgnjx.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnjx_vv_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjx.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnjx_vv_bf16mf4(vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfsgnjx(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnjx_vf_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjx.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnjx_vf_bf16mf4(vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnjx_vv_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjx.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnjx_vv_bf16mf2(vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfsgnjx(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnjx_vf_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjx.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnjx_vf_bf16mf2(vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnjx_vv_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjx.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnjx_vv_bf16m1(vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfsgnjx(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnjx_vf_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjx.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnjx_vf_bf16m1(vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnjx_vv_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjx.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnjx_vv_bf16m2(vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfsgnjx(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnjx_vf_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjx.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnjx_vf_bf16m2(vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnjx_vv_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjx.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnjx_vv_bf16m4(vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfsgnjx(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnjx_vf_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjx.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnjx_vf_bf16m4(vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnjx_vv_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjx.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnjx_vv_bf16m8(vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfsgnjx(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnjx_vf_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjx.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnjx_vf_bf16m8(vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnjx_vv_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnjx_vv_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfsgnjx(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnjx_vf_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnjx_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnjx_vv_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnjx_vv_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfsgnjx(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnjx_vf_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnjx_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnjx_vv_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnjx_vv_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfsgnjx(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnjx_vf_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnjx_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnjx_vv_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnjx_vv_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfsgnjx(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnjx_vf_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnjx_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnjx_vv_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnjx_vv_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfsgnjx(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnjx_vf_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnjx_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnjx_vv_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnjx_vv_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfsgnjx(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnjx_vf_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnjx_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx(mask, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfslide1down.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfslide1down.c
new file mode 100644
index 0000000000000..4a7689492de92
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfslide1down.c
@@ -0,0 +1,129 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfslide1down_vf_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfslide1down.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[SRC]], bfloat [[VALUE]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfslide1down_vf_bf16mf4(vbfloat16mf4_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down(src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfslide1down_vf_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfslide1down.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[SRC]], bfloat [[VALUE]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfslide1down_vf_bf16mf2(vbfloat16mf2_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down(src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfslide1down_vf_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfslide1down.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[SRC]], bfloat [[VALUE]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfslide1down_vf_bf16m1(vbfloat16m1_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down(src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfslide1down_vf_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfslide1down.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[SRC]], bfloat [[VALUE]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfslide1down_vf_bf16m2(vbfloat16m2_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down(src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfslide1down_vf_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfslide1down.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[SRC]], bfloat [[VALUE]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfslide1down_vf_bf16m4(vbfloat16m4_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down(src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfslide1down_vf_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfslide1down.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[SRC]], bfloat [[VALUE]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfslide1down_vf_bf16m8(vbfloat16m8_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down(src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfslide1down_vf_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfslide1down.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfslide1down_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down(mask, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfslide1down_vf_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfslide1down.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfslide1down_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down(mask, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfslide1down_vf_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfslide1down.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfslide1down_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down(mask, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfslide1down_vf_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfslide1down.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfslide1down_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down(mask, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfslide1down_vf_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfslide1down.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfslide1down_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down(mask, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfslide1down_vf_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfslide1down.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfslide1down_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down(mask, src, value, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfslide1up.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfslide1up.c
new file mode 100644
index 0000000000000..f9f2dc0cf26b8
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfslide1up.c
@@ -0,0 +1,129 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfslide1up_vf_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfslide1up.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[SRC]], bfloat [[VALUE]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfslide1up_vf_bf16mf4(vbfloat16mf4_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up(src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfslide1up_vf_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfslide1up.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[SRC]], bfloat [[VALUE]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfslide1up_vf_bf16mf2(vbfloat16mf2_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up(src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfslide1up_vf_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfslide1up.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[SRC]], bfloat [[VALUE]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfslide1up_vf_bf16m1(vbfloat16m1_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up(src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfslide1up_vf_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfslide1up.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[SRC]], bfloat [[VALUE]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfslide1up_vf_bf16m2(vbfloat16m2_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up(src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfslide1up_vf_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfslide1up.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[SRC]], bfloat [[VALUE]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfslide1up_vf_bf16m4(vbfloat16m4_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up(src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfslide1up_vf_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfslide1up.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[SRC]], bfloat [[VALUE]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfslide1up_vf_bf16m8(vbfloat16m8_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up(src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfslide1up_vf_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfslide1up.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfslide1up_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up(mask, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfslide1up_vf_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfslide1up.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfslide1up_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up(mask, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfslide1up_vf_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfslide1up.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfslide1up_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up(mask, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfslide1up_vf_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfslide1up.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfslide1up_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up(mask, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfslide1up_vf_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfslide1up.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfslide1up_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up(mask, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfslide1up_vf_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfslide1up.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfslide1up_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up(mask, src, value, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfsub.c
new file mode 100644
index 0000000000000..ebcf6fa4669a3
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfsub.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsub_vv_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsub.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsub_vv_bf16mf4(vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfsub(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsub_vf_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsub.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsub_vf_bf16mf4(vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsub_vv_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsub.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsub_vv_bf16mf2(vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfsub(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsub_vf_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsub.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsub_vf_bf16mf2(vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsub_vv_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsub.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsub_vv_bf16m1(vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfsub(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsub_vf_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsub.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsub_vf_bf16m1(vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsub_vv_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsub.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsub_vv_bf16m2(vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfsub(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsub_vf_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsub.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsub_vf_bf16m2(vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsub_vv_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsub.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsub_vv_bf16m4(vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfsub(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsub_vf_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsub.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsub_vf_bf16m4(vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsub_vv_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsub.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsub_vv_bf16m8(vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfsub(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsub_vf_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsub.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsub_vf_bf16m8(vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsub_vv_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsub.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsub_vv_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfsub(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsub_vf_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsub.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsub_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsub_vv_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsub.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsub_vv_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfsub(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsub_vf_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsub.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsub_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsub_vv_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsub.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsub_vv_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfsub(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsub_vf_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsub.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsub_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsub_vv_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsub.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsub_vv_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfsub(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsub_vf_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsub.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsub_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsub_vv_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsub.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsub_vv_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfsub(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsub_vf_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsub.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsub_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsub_vv_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsub.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsub_vv_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfsub(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsub_vf_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsub.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsub_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub(mask, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfwadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfwadd.c
new file mode 100644
index 0000000000000..124e7fb6de342
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfwadd.c
@@ -0,0 +1,893 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_vv_bf16mf4_f32mf2(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_vv_bf16mf4_f32mf2(vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_vf_bf16mf4_f32mf2(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_vf_bf16mf4_f32mf2(vbfloat16mf4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_wv_bf16mf4_f32mf2(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[VS2]], <vscale x 1 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_wv_bf16mf4_f32mf2(vfloat32mf2_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_wf_bf16_f32mf2(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.nxv1f32.bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_wf_bf16_f32mf2(vfloat32mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_vv_bf16mf2_f32m1(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_vv_bf16mf2_f32m1(vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_vf_bf16mf2_f32m1(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_vf_bf16mf2_f32m1(vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_wv_bf16mf2_f32m1(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[VS2]], <vscale x 2 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_wv_bf16mf2_f32m1(vfloat32m1_t vs2, vbfloat16mf2_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_wv(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_wf_bf16_f32m1(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.nxv2f32.bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_wf_bf16_f32m1(vfloat32m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_vv_bf16m1_f32m2(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_vv_bf16m1_f32m2(vbfloat16m1_t vs2, vbfloat16m1_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_vv(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_vf_bf16m1_f32m2(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_vf_bf16m1_f32m2(vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_wv_bf16m1_f32m2(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[VS2]], <vscale x 4 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_wv_bf16m1_f32m2(vfloat32m2_t vs2, vbfloat16m1_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_wv(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_wf_bf16_f32m2(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.nxv4f32.bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_wf_bf16_f32m2(vfloat32m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_vv_bf16m2_f32m4(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_vv_bf16m2_f32m4(vbfloat16m2_t vs2, vbfloat16m2_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_vv(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_vf_bf16m2_f32m4(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_vf_bf16m2_f32m4(vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_wv_bf16m2_f32m4(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[VS2]], <vscale x 8 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_wv_bf16m2_f32m4(vfloat32m4_t vs2, vbfloat16m2_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_wv(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_wf_bf16_f32m4(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.nxv8f32.bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_wf_bf16_f32m4(vfloat32m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_vv_bf16m4_f32m8(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_vv_bf16m4_f32m8(vbfloat16m4_t vs2, vbfloat16m4_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_vv(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_vf_bf16m4_f32m8(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_vf_bf16m4_f32m8(vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_wv_bf16m4_f32m8(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[VS2]], <vscale x 16 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_wv_bf16m4_f32m8(vfloat32m8_t vs2, vbfloat16m4_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_wv(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_wf_bf16_f32m8(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.nxv16f32.bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_wf_bf16_f32m8(vfloat32m8_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_vv_bf16mf4_f32mf2_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_vv_bf16mf4_f32mf2_m(vbool64_t vm, vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_vf_bf16mf4_f32mf2_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_vf_bf16mf4_f32mf2_m(vbool64_t vm, vbfloat16mf4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_vf(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_wv_bf16mf4_f32mf2_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_wv_bf16mf4_f32mf2_m(vbool64_t vm, vfloat32mf2_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_wf_bf16_f32mf2_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f32.bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_wf_bf16_f32mf2_m(vbool64_t vm, vfloat32mf2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_wf(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_vv_bf16mf2_f32m1_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_vv_bf16mf2_f32m1_m(vbool32_t vm, vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_vf_bf16mf2_f32m1_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_vf_bf16mf2_f32m1_m(vbool32_t vm, vbfloat16mf2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_vf(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_wv_bf16mf2_f32m1_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_wv_bf16mf2_f32m1_m(vbool32_t vm, vfloat32m1_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_wf_bf16_f32m1_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f32.bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_wf_bf16_f32m1_m(vbool32_t vm, vfloat32m1_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_wf(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_vv_bf16m1_f32m2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_vv_bf16m1_f32m2_m(vbool16_t vm, vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_vf_bf16m1_f32m2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_vf_bf16m1_f32m2_m(vbool16_t vm, vbfloat16m1_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_vf(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_wv_bf16m1_f32m2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_wv_bf16m1_f32m2_m(vbool16_t vm, vfloat32m2_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_wf_bf16_f32m2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f32.bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_wf_bf16_f32m2_m(vbool16_t vm, vfloat32m2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_wf(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_vv_bf16m2_f32m4_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_vv_bf16m2_f32m4_m(vbool8_t vm, vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_vf_bf16m2_f32m4_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_vf_bf16m2_f32m4_m(vbool8_t vm, vbfloat16m2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_vf(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_wv_bf16m2_f32m4_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_wv_bf16m2_f32m4_m(vbool8_t vm, vfloat32m4_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_wf_bf16_f32m4_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f32.bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_wf_bf16_f32m4_m(vbool8_t vm, vfloat32m4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_wf(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_vv_bf16m4_f32m8_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_vv_bf16m4_f32m8_m(vbool4_t vm, vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_vf_bf16m4_f32m8_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_vf_bf16m4_f32m8_m(vbool4_t vm, vbfloat16m4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_vf(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_wv_bf16m4_f32m8_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_wv_bf16m4_f32m8_m(vbool4_t vm, vfloat32m8_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_wf_bf16_f32m8_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f32.bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_wf_bf16_f32m8_m(vbool4_t vm, vfloat32m8_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_wf(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_vv_bf16mf4_f32mf2_rm(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_vv_bf16mf4_f32mf2_rm(vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_vf_bf16mf4_f32mf2_rm(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_vf_bf16mf4_f32mf2_rm(vbfloat16mf4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_wv_bf16mf4_f32mf2_rm(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[VS2]], <vscale x 1 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_wv_bf16mf4_f32mf2_rm(vfloat32mf2_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_wf_bf16_f32mf2_rm(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.nxv1f32.bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_wf_bf16_f32mf2_rm(vfloat32mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_vv_bf16mf2_f32m1_rm(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_vv_bf16mf2_f32m1_rm(vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_vf_bf16mf2_f32m1_rm(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_vf_bf16mf2_f32m1_rm(vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_wv_bf16mf2_f32m1_rm(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[VS2]], <vscale x 2 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_wv_bf16mf2_f32m1_rm(vfloat32m1_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_wf_bf16_f32m1_rm(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.nxv2f32.bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_wf_bf16_f32m1_rm(vfloat32m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_vv_bf16m1_f32m2_rm(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_vv_bf16m1_f32m2_rm(vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_vf_bf16m1_f32m2_rm(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_vf_bf16m1_f32m2_rm(vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_wv_bf16m1_f32m2_rm(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[VS2]], <vscale x 4 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_wv_bf16m1_f32m2_rm(vfloat32m2_t vs2, vbfloat16m1_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_wv(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_wf_bf16_f32m2_rm(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.nxv4f32.bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_wf_bf16_f32m2_rm(vfloat32m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_vv_bf16m2_f32m4_rm(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_vv_bf16m2_f32m4_rm(vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_vf_bf16m2_f32m4_rm(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_vf_bf16m2_f32m4_rm(vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_wv_bf16m2_f32m4_rm(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[VS2]], <vscale x 8 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_wv_bf16m2_f32m4_rm(vfloat32m4_t vs2, vbfloat16m2_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_wv(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_wf_bf16_f32m4_rm(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.nxv8f32.bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_wf_bf16_f32m4_rm(vfloat32m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_vv_bf16m4_f32m8_rm(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_vv_bf16m4_f32m8_rm(vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_vf_bf16m4_f32m8_rm(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_vf_bf16m4_f32m8_rm(vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_wv_bf16m4_f32m8_rm(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[VS2]], <vscale x 16 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_wv_bf16m4_f32m8_rm(vfloat32m8_t vs2, vbfloat16m4_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_wv(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_wf_bf16_f32m8_rm(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.nxv16f32.bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_wf_bf16_f32m8_rm(vfloat32m8_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_vv_bf16mf4_f32mf2_rm_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_vv_bf16mf4_f32mf2_rm_m(vbool64_t vm,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_vv(vm, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_vf_bf16mf4_f32mf2_rm_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_vf_bf16mf4_f32mf2_rm_m(vbool64_t vm,
+ vbfloat16mf4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf(vm, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_wv_bf16mf4_f32mf2_rm_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_wv_bf16mf4_f32mf2_rm_m(vbool64_t vm,
+ vfloat32mf2_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_wv(vm, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_wf_bf16_f32mf2_rm_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f32.bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_wf_bf16_f32mf2_rm_m(vbool64_t vm, vfloat32mf2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_wf(vm, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_vv_bf16mf2_f32m1_rm_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_vv_bf16mf2_f32m1_rm_m(vbool32_t vm, vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv(vm, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_vf_bf16mf2_f32m1_rm_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_vf_bf16mf2_f32m1_rm_m(vbool32_t vm, vbfloat16mf2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_vf(vm, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_wv_bf16mf2_f32m1_rm_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_wv_bf16mf2_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv(vm, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_wf_bf16_f32m1_rm_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f32.bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_wf_bf16_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_wf(vm, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_vv_bf16m1_f32m2_rm_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_vv_bf16m1_f32m2_rm_m(vbool16_t vm, vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv(vm, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_vf_bf16m1_f32m2_rm_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_vf_bf16m1_f32m2_rm_m(vbool16_t vm, vbfloat16m1_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_vf(vm, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_wv_bf16m1_f32m2_rm_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_wv_bf16m1_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv(vm, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_wf_bf16_f32m2_rm_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f32.bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_wf_bf16_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_wf(vm, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_vv_bf16m2_f32m4_rm_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_vv_bf16m2_f32m4_rm_m(vbool8_t vm, vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv(vm, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_vf_bf16m2_f32m4_rm_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_vf_bf16m2_f32m4_rm_m(vbool8_t vm, vbfloat16m2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_vf(vm, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_wv_bf16m2_f32m4_rm_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_wv_bf16m2_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv(vm, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_wf_bf16_f32m4_rm_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f32.bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_wf_bf16_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_wf(vm, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_vv_bf16m4_f32m8_rm_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_vv_bf16m4_f32m8_rm_m(vbool4_t vm, vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv(vm, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_vf_bf16m4_f32m8_rm_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_vf_bf16m4_f32m8_rm_m(vbool4_t vm, vbfloat16m4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_vf(vm, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_wv_bf16m4_f32m8_rm_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_wv_bf16m4_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv(vm, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_wf_bf16_f32m8_rm_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f32.bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_wf_bf16_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_wf(vm, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfwcvt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfwcvt.c
new file mode 100644
index 0000000000000..0399a639898a0
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfwcvt.c
@@ -0,0 +1,366 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfwcvt_f_x_v_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfwcvt.f.x.v.nxv1bf16.nxv1i8.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfwcvt_f_x_v_bf16mf4(vint8mf8_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_bf16(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfwcvt_f_x_v_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfwcvt.f.x.v.nxv2bf16.nxv2i8.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfwcvt_f_x_v_bf16mf2(vint8mf4_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_bf16(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfwcvt_f_x_v_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfwcvt.f.x.v.nxv4bf16.nxv4i8.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfwcvt_f_x_v_bf16m1(vint8mf2_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_bf16(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfwcvt_f_x_v_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfwcvt.f.x.v.nxv8bf16.nxv8i8.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfwcvt_f_x_v_bf16m2(vint8m1_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_bf16(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfwcvt_f_x_v_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfwcvt.f.x.v.nxv16bf16.nxv16i8.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfwcvt_f_x_v_bf16m4(vint8m2_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_bf16(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfwcvt_f_x_v_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfwcvt.f.x.v.nxv32bf16.nxv32i8.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfwcvt_f_x_v_bf16m8(vint8m4_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_bf16(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfwcvt_f_xu_v_bf16mf4(
+// CHECK-RV64-SAME: <vscale x 1 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.nxv1bf16.nxv1i8.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfwcvt_f_xu_v_bf16mf4(vuint8mf8_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_bf16(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfwcvt_f_xu_v_bf16mf2(
+// CHECK-RV64-SAME: <vscale x 2 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.nxv2bf16.nxv2i8.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfwcvt_f_xu_v_bf16mf2(vuint8mf4_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_bf16(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfwcvt_f_xu_v_bf16m1(
+// CHECK-RV64-SAME: <vscale x 4 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.nxv4bf16.nxv4i8.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfwcvt_f_xu_v_bf16m1(vuint8mf2_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_bf16(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfwcvt_f_xu_v_bf16m2(
+// CHECK-RV64-SAME: <vscale x 8 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.nxv8bf16.nxv8i8.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfwcvt_f_xu_v_bf16m2(vuint8m1_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_bf16(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfwcvt_f_xu_v_bf16m4(
+// CHECK-RV64-SAME: <vscale x 16 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.nxv16bf16.nxv16i8.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfwcvt_f_xu_v_bf16m4(vuint8m2_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_bf16(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfwcvt_f_xu_v_bf16m8(
+// CHECK-RV64-SAME: <vscale x 32 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.nxv32bf16.nxv32i8.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfwcvt_f_xu_v_bf16m8(vuint8m4_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_bf16(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwcvt_f_f_v_bf16mf4_f32mf2(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwcvt.f.f.v.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwcvt_f_f_v_bf16mf4_f32mf2(vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwcvt_f_f_v_bf16mf2_f32m1(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwcvt.f.f.v.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwcvt_f_f_v_bf16mf2_f32m1(vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwcvt_f_f_v_bf16m1_f32m2(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwcvt.f.f.v.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwcvt_f_f_v_bf16m1_f32m2(vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwcvt_f_f_v_bf16m2_f32m4(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwcvt.f.f.v.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwcvt_f_f_v_bf16m2_f32m4(vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwcvt_f_f_v_bf16m4_f32m8(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwcvt.f.f.v.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwcvt_f_f_v_bf16m4_f32m8(vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfwcvt_f_x_v_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv1bf16.nxv1i8.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x i8> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfwcvt_f_x_v_bf16mf4_m(vbool64_t vm, vint8mf8_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_bf16(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfwcvt_f_x_v_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv2bf16.nxv2i8.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x i8> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfwcvt_f_x_v_bf16mf2_m(vbool32_t vm, vint8mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_bf16(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfwcvt_f_x_v_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv4bf16.nxv4i8.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x i8> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfwcvt_f_x_v_bf16m1_m(vbool16_t vm, vint8mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_bf16(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfwcvt_f_x_v_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv8bf16.nxv8i8.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x i8> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfwcvt_f_x_v_bf16m2_m(vbool8_t vm, vint8m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_bf16(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfwcvt_f_x_v_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv16bf16.nxv16i8.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x i8> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfwcvt_f_x_v_bf16m4_m(vbool4_t vm, vint8m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_bf16(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfwcvt_f_x_v_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv32bf16.nxv32i8.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x i8> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfwcvt_f_x_v_bf16m8_m(vbool2_t vm, vint8m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_bf16(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfwcvt_f_xu_v_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv1bf16.nxv1i8.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x i8> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfwcvt_f_xu_v_bf16mf4_m(vbool64_t vm, vuint8mf8_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_bf16(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfwcvt_f_xu_v_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv2bf16.nxv2i8.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x i8> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfwcvt_f_xu_v_bf16mf2_m(vbool32_t vm, vuint8mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_bf16(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfwcvt_f_xu_v_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv4bf16.nxv4i8.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x i8> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfwcvt_f_xu_v_bf16m1_m(vbool16_t vm, vuint8mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_bf16(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfwcvt_f_xu_v_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv8bf16.nxv8i8.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x i8> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfwcvt_f_xu_v_bf16m2_m(vbool8_t vm, vuint8m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_bf16(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfwcvt_f_xu_v_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv16bf16.nxv16i8.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x i8> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfwcvt_f_xu_v_bf16m4_m(vbool4_t vm, vuint8m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_bf16(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfwcvt_f_xu_v_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv32bf16.nxv32i8.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x i8> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfwcvt_f_xu_v_bf16m8_m(vbool2_t vm, vuint8m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_bf16(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwcvt_f_f_v_bf16mf4_f32mf2_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwcvt_f_f_v_bf16mf4_f32mf2_m(vbool64_t vm,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwcvt_f_f_v_bf16mf2_f32m1_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwcvt_f_f_v_bf16mf2_f32m1_m(vbool32_t vm, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwcvt_f_f_v_bf16m1_f32m2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwcvt_f_f_v_bf16m1_f32m2_m(vbool16_t vm, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwcvt_f_f_v_bf16m2_f32m4_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwcvt_f_f_v_bf16m2_f32m4_m(vbool8_t vm, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwcvt_f_f_v_bf16m4_f32m8_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwcvt_f_f_v_bf16m4_f32m8_m(vbool4_t vm, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f(vm, vs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfwmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfwmacc.c
new file mode 100644
index 0000000000000..2eb7fc8cdeea8
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfwmacc.c
@@ -0,0 +1,474 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmacc_vv_bf16mf4_f32mf2(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmacc_vv_bf16mf4_f32mf2(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwmacc(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmacc_vf_bf16mf4_f32mf2(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmacc_vf_bf16mf4_f32mf2(vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwmacc(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmacc_vv_bf16mf2_f32m1(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmacc_vv_bf16mf2_f32m1(vfloat32m1_t vd, vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwmacc(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmacc_vf_bf16mf2_f32m1(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmacc_vf_bf16mf2_f32m1(vfloat32m1_t vd, __bf16 vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwmacc(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmacc_vv_bf16m1_f32m2(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmacc_vv_bf16m1_f32m2(vfloat32m2_t vd, vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmacc(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmacc_vf_bf16m1_f32m2(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmacc_vf_bf16m1_f32m2(vfloat32m2_t vd, __bf16 vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmacc(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmacc_vv_bf16m2_f32m4(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmacc_vv_bf16m2_f32m4(vfloat32m4_t vd, vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmacc(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmacc_vf_bf16m2_f32m4(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmacc_vf_bf16m2_f32m4(vfloat32m4_t vd, __bf16 vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmacc(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmacc_vv_bf16m4_f32m8(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmacc_vv_bf16m4_f32m8(vfloat32m8_t vd, vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmacc(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmacc_vf_bf16m4_f32m8(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmacc_vf_bf16m4_f32m8(vfloat32m8_t vd, __bf16 vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmacc(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmacc_vv_bf16mf4_f32mf2_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmacc_vv_bf16mf4_f32mf2_m(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwmacc(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmacc_vf_bf16mf4_f32mf2_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmacc_vf_bf16mf4_f32mf2_m(vbool64_t vm, vfloat32mf2_t vd,
+ __bf16 vs1, vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmacc_vv_bf16mf2_f32m1_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmacc_vv_bf16mf2_f32m1_m(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwmacc(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmacc_vf_bf16mf2_f32m1_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmacc_vf_bf16mf2_f32m1_m(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmacc_vv_bf16m1_f32m2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmacc_vv_bf16m1_f32m2_m(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmacc(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmacc_vf_bf16m1_f32m2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmacc_vf_bf16m1_f32m2_m(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmacc_vv_bf16m2_f32m4_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmacc_vv_bf16m2_f32m4_m(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmacc(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmacc_vf_bf16m2_f32m4_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmacc_vf_bf16m2_f32m4_m(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmacc_vv_bf16m4_f32m8_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmacc_vv_bf16m4_f32m8_m(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmacc(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmacc_vf_bf16m4_f32m8_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmacc_vf_bf16m4_f32m8_m(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmacc_vv_bf16mf4_f32mf2_rm(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmacc_vv_bf16mf4_f32mf2_rm(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwmacc(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmacc_vf_bf16mf4_f32mf2_rm(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmacc_vf_bf16mf4_f32mf2_rm(vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwmacc(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmacc_vv_bf16mf2_f32m1_rm(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmacc_vv_bf16mf2_f32m1_rm(vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwmacc(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmacc_vf_bf16mf2_f32m1_rm(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmacc_vf_bf16mf2_f32m1_rm(vfloat32m1_t vd, __bf16 vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwmacc(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmacc_vv_bf16m1_f32m2_rm(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmacc_vv_bf16m1_f32m2_rm(vfloat32m2_t vd, vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmacc(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmacc_vf_bf16m1_f32m2_rm(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmacc_vf_bf16m1_f32m2_rm(vfloat32m2_t vd, __bf16 vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmacc(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmacc_vv_bf16m2_f32m4_rm(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmacc_vv_bf16m2_f32m4_rm(vfloat32m4_t vd, vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmacc(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmacc_vf_bf16m2_f32m4_rm(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmacc_vf_bf16m2_f32m4_rm(vfloat32m4_t vd, __bf16 vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmacc(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmacc_vv_bf16m4_f32m8_rm(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmacc_vv_bf16m4_f32m8_rm(vfloat32m8_t vd, vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmacc(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmacc_vf_bf16m4_f32m8_rm(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmacc_vf_bf16m4_f32m8_rm(vfloat32m8_t vd, __bf16 vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmacc(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmacc_vv_bf16mf4_f32mf2_rm_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmacc_vv_bf16mf4_f32mf2_rm_m(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmacc_vf_bf16mf4_f32mf2_rm_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmacc_vf_bf16mf4_f32mf2_rm_m(vbool64_t vm,
+ vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmacc_vv_bf16mf2_f32m1_rm_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmacc_vv_bf16mf2_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwmacc(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmacc_vf_bf16mf2_f32m1_rm_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmacc_vf_bf16mf2_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmacc_vv_bf16m1_f32m2_rm_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmacc_vv_bf16m1_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmacc(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmacc_vf_bf16m1_f32m2_rm_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmacc_vf_bf16m1_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmacc_vv_bf16m2_f32m4_rm_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmacc_vv_bf16m2_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmacc(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmacc_vf_bf16m2_f32m4_rm_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmacc_vf_bf16m2_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmacc_vv_bf16m4_f32m8_rm_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmacc_vv_bf16m4_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmacc(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmacc_vf_bf16m4_f32m8_rm_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmacc_vf_bf16m4_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfwmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfwmsac.c
new file mode 100644
index 0000000000000..28f507619b428
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfwmsac.c
@@ -0,0 +1,474 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmsac_vv_bf16mf4_f32mf2(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmsac_vv_bf16mf4_f32mf2(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwmsac(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmsac_vf_bf16mf4_f32mf2(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmsac_vf_bf16mf4_f32mf2(vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwmsac(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmsac_vv_bf16mf2_f32m1(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmsac_vv_bf16mf2_f32m1(vfloat32m1_t vd, vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwmsac(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmsac_vf_bf16mf2_f32m1(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmsac_vf_bf16mf2_f32m1(vfloat32m1_t vd, __bf16 vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwmsac(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmsac_vv_bf16m1_f32m2(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmsac_vv_bf16m1_f32m2(vfloat32m2_t vd, vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmsac(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmsac_vf_bf16m1_f32m2(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmsac_vf_bf16m1_f32m2(vfloat32m2_t vd, __bf16 vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmsac(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmsac_vv_bf16m2_f32m4(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmsac_vv_bf16m2_f32m4(vfloat32m4_t vd, vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmsac(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmsac_vf_bf16m2_f32m4(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmsac_vf_bf16m2_f32m4(vfloat32m4_t vd, __bf16 vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmsac(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmsac_vv_bf16m4_f32m8(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmsac_vv_bf16m4_f32m8(vfloat32m8_t vd, vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmsac(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmsac_vf_bf16m4_f32m8(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmsac_vf_bf16m4_f32m8(vfloat32m8_t vd, __bf16 vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmsac(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmsac_vv_bf16mf4_f32mf2_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmsac_vv_bf16mf4_f32mf2_m(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwmsac(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmsac_vf_bf16mf4_f32mf2_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmsac_vf_bf16mf4_f32mf2_m(vbool64_t vm, vfloat32mf2_t vd,
+ __bf16 vs1, vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmsac_vv_bf16mf2_f32m1_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmsac_vv_bf16mf2_f32m1_m(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwmsac(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmsac_vf_bf16mf2_f32m1_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmsac_vf_bf16mf2_f32m1_m(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmsac_vv_bf16m1_f32m2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmsac_vv_bf16m1_f32m2_m(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmsac(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmsac_vf_bf16m1_f32m2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmsac_vf_bf16m1_f32m2_m(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmsac_vv_bf16m2_f32m4_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmsac_vv_bf16m2_f32m4_m(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmsac(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmsac_vf_bf16m2_f32m4_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmsac_vf_bf16m2_f32m4_m(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmsac_vv_bf16m4_f32m8_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmsac_vv_bf16m4_f32m8_m(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmsac(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmsac_vf_bf16m4_f32m8_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmsac_vf_bf16m4_f32m8_m(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmsac_vv_bf16mf4_f32mf2_rm(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmsac_vv_bf16mf4_f32mf2_rm(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwmsac(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmsac_vf_bf16mf4_f32mf2_rm(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmsac_vf_bf16mf4_f32mf2_rm(vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwmsac(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmsac_vv_bf16mf2_f32m1_rm(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmsac_vv_bf16mf2_f32m1_rm(vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwmsac(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmsac_vf_bf16mf2_f32m1_rm(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmsac_vf_bf16mf2_f32m1_rm(vfloat32m1_t vd, __bf16 vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwmsac(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmsac_vv_bf16m1_f32m2_rm(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmsac_vv_bf16m1_f32m2_rm(vfloat32m2_t vd, vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmsac(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmsac_vf_bf16m1_f32m2_rm(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmsac_vf_bf16m1_f32m2_rm(vfloat32m2_t vd, __bf16 vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmsac(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmsac_vv_bf16m2_f32m4_rm(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmsac_vv_bf16m2_f32m4_rm(vfloat32m4_t vd, vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmsac(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmsac_vf_bf16m2_f32m4_rm(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmsac_vf_bf16m2_f32m4_rm(vfloat32m4_t vd, __bf16 vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmsac(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmsac_vv_bf16m4_f32m8_rm(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmsac_vv_bf16m4_f32m8_rm(vfloat32m8_t vd, vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmsac(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmsac_vf_bf16m4_f32m8_rm(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmsac_vf_bf16m4_f32m8_rm(vfloat32m8_t vd, __bf16 vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmsac(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmsac_vv_bf16mf4_f32mf2_rm_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmsac_vv_bf16mf4_f32mf2_rm_m(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmsac_vf_bf16mf4_f32mf2_rm_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmsac_vf_bf16mf4_f32mf2_rm_m(vbool64_t vm,
+ vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmsac_vv_bf16mf2_f32m1_rm_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmsac_vv_bf16mf2_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwmsac(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmsac_vf_bf16mf2_f32m1_rm_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmsac_vf_bf16mf2_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmsac_vv_bf16m1_f32m2_rm_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmsac_vv_bf16m1_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmsac(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmsac_vf_bf16m1_f32m2_rm_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmsac_vf_bf16m1_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmsac_vv_bf16m2_f32m4_rm_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmsac_vv_bf16m2_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmsac(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmsac_vf_bf16m2_f32m4_rm_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmsac_vf_bf16m2_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmsac_vv_bf16m4_f32m8_rm_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmsac_vv_bf16m4_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmsac(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmsac_vf_bf16m4_f32m8_rm_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmsac_vf_bf16m4_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfwmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfwmul.c
new file mode 100644
index 0000000000000..8de49fa586dcf
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfwmul.c
@@ -0,0 +1,451 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmul_vv_bf16mf4_f32mf2(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmul_vv_bf16mf4_f32mf2(vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwmul(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmul_vf_bf16mf4_f32mf2(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmul_vf_bf16mf4_f32mf2(vbfloat16mf4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmul_vv_bf16mf2_f32m1(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmul_vv_bf16mf2_f32m1(vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwmul(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmul_vf_bf16mf2_f32m1(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmul_vf_bf16mf2_f32m1(vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmul_vv_bf16m1_f32m2(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmul_vv_bf16m1_f32m2(vbfloat16m1_t vs2, vbfloat16m1_t vs1,
+ size_t vl) {
+ return __riscv_vfwmul(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmul_vf_bf16m1_f32m2(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmul_vf_bf16m1_f32m2(vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmul_vv_bf16m2_f32m4(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmul_vv_bf16m2_f32m4(vbfloat16m2_t vs2, vbfloat16m2_t vs1,
+ size_t vl) {
+ return __riscv_vfwmul(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmul_vf_bf16m2_f32m4(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmul_vf_bf16m2_f32m4(vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmul_vv_bf16m4_f32m8(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmul_vv_bf16m4_f32m8(vbfloat16m4_t vs2, vbfloat16m4_t vs1,
+ size_t vl) {
+ return __riscv_vfwmul(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmul_vf_bf16m4_f32m8(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmul_vf_bf16m4_f32m8(vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmul_vv_bf16mf4_f32mf2_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmul_vv_bf16mf4_f32mf2_m(vbool64_t vm, vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwmul(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmul_vf_bf16mf4_f32mf2_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmul_vf_bf16mf4_f32mf2_m(vbool64_t vm, vbfloat16mf4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwmul(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmul_vv_bf16mf2_f32m1_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmul_vv_bf16mf2_f32m1_m(vbool32_t vm, vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwmul(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmul_vf_bf16mf2_f32m1_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmul_vf_bf16mf2_f32m1_m(vbool32_t vm, vbfloat16mf2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwmul(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmul_vv_bf16m1_f32m2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmul_vv_bf16m1_f32m2_m(vbool16_t vm, vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwmul(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmul_vf_bf16m1_f32m2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmul_vf_bf16m1_f32m2_m(vbool16_t vm, vbfloat16m1_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwmul(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmul_vv_bf16m2_f32m4_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmul_vv_bf16m2_f32m4_m(vbool8_t vm, vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwmul(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmul_vf_bf16m2_f32m4_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmul_vf_bf16m2_f32m4_m(vbool8_t vm, vbfloat16m2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwmul(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmul_vv_bf16m4_f32m8_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmul_vv_bf16m4_f32m8_m(vbool4_t vm, vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwmul(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmul_vf_bf16m4_f32m8_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmul_vf_bf16m4_f32m8_m(vbool4_t vm, vbfloat16m4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwmul(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmul_vv_bf16mf4_f32mf2_rm(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmul_vv_bf16mf4_f32mf2_rm(vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwmul(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmul_vf_bf16mf4_f32mf2_rm(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmul_vf_bf16mf4_f32mf2_rm(vbfloat16mf4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmul_vv_bf16mf2_f32m1_rm(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmul_vv_bf16mf2_f32m1_rm(vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwmul(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmul_vf_bf16mf2_f32m1_rm(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmul_vf_bf16mf2_f32m1_rm(vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmul_vv_bf16m1_f32m2_rm(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmul_vv_bf16m1_f32m2_rm(vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwmul(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmul_vf_bf16m1_f32m2_rm(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmul_vf_bf16m1_f32m2_rm(vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmul_vv_bf16m2_f32m4_rm(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmul_vv_bf16m2_f32m4_rm(vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwmul(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmul_vf_bf16m2_f32m4_rm(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmul_vf_bf16m2_f32m4_rm(vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmul_vv_bf16m4_f32m8_rm(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmul_vv_bf16m4_f32m8_rm(vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwmul(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmul_vf_bf16m4_f32m8_rm(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmul_vf_bf16m4_f32m8_rm(vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmul_vv_bf16mf4_f32mf2_rm_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmul_vv_bf16mf4_f32mf2_rm_m(vbool64_t vm,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwmul(vm, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmul_vf_bf16mf4_f32mf2_rm_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmul_vf_bf16mf4_f32mf2_rm_m(vbool64_t vm,
+ vbfloat16mf4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul(vm, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmul_vv_bf16mf2_f32m1_rm_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmul_vv_bf16mf2_f32m1_rm_m(vbool32_t vm, vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwmul(vm, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmul_vf_bf16mf2_f32m1_rm_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmul_vf_bf16mf2_f32m1_rm_m(vbool32_t vm, vbfloat16mf2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwmul(vm, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmul_vv_bf16m1_f32m2_rm_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmul_vv_bf16m1_f32m2_rm_m(vbool16_t vm, vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwmul(vm, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmul_vf_bf16m1_f32m2_rm_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmul_vf_bf16m1_f32m2_rm_m(vbool16_t vm, vbfloat16m1_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwmul(vm, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmul_vv_bf16m2_f32m4_rm_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmul_vv_bf16m2_f32m4_rm_m(vbool8_t vm, vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwmul(vm, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmul_vf_bf16m2_f32m4_rm_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmul_vf_bf16m2_f32m4_rm_m(vbool8_t vm, vbfloat16m2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwmul(vm, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmul_vv_bf16m4_f32m8_rm_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmul_vv_bf16m4_f32m8_rm_m(vbool4_t vm, vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwmul(vm, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmul_vf_bf16m4_f32m8_rm_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmul_vf_bf16m4_f32m8_rm_m(vbool4_t vm, vbfloat16m4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwmul(vm, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfwnmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfwnmacc.c
new file mode 100644
index 0000000000000..783693106a217
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfwnmacc.c
@@ -0,0 +1,480 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmacc_vv_bf16mf4_f32mf2(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmacc_vv_bf16mf4_f32mf2(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwnmacc(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmacc_vf_bf16mf4_f32mf2(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmacc_vf_bf16mf4_f32mf2(vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwnmacc(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmacc_vv_bf16mf2_f32m1(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmacc_vv_bf16mf2_f32m1(vfloat32m1_t vd, vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwnmacc(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmacc_vf_bf16mf2_f32m1(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmacc_vf_bf16mf2_f32m1(vfloat32m1_t vd, __bf16 vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwnmacc(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmacc_vv_bf16m1_f32m2(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmacc_vv_bf16m1_f32m2(vfloat32m2_t vd, vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmacc(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmacc_vf_bf16m1_f32m2(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmacc_vf_bf16m1_f32m2(vfloat32m2_t vd, __bf16 vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmacc(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmacc_vv_bf16m2_f32m4(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmacc_vv_bf16m2_f32m4(vfloat32m4_t vd, vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmacc(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmacc_vf_bf16m2_f32m4(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmacc_vf_bf16m2_f32m4(vfloat32m4_t vd, __bf16 vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmacc(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmacc_vv_bf16m4_f32m8(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmacc_vv_bf16m4_f32m8(vfloat32m8_t vd, vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmacc(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmacc_vf_bf16m4_f32m8(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmacc_vf_bf16m4_f32m8(vfloat32m8_t vd, __bf16 vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmacc(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmacc_vv_bf16mf4_f32mf2_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmacc_vv_bf16mf4_f32mf2_m(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwnmacc(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmacc_vf_bf16mf4_f32mf2_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmacc_vf_bf16mf4_f32mf2_m(vbool64_t vm, vfloat32mf2_t vd,
+ __bf16 vs1, vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmacc_vv_bf16mf2_f32m1_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmacc_vv_bf16mf2_f32m1_m(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwnmacc(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmacc_vf_bf16mf2_f32m1_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmacc_vf_bf16mf2_f32m1_m(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmacc_vv_bf16m1_f32m2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmacc_vv_bf16m1_f32m2_m(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmacc(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmacc_vf_bf16m1_f32m2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmacc_vf_bf16m1_f32m2_m(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmacc_vv_bf16m2_f32m4_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmacc_vv_bf16m2_f32m4_m(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmacc(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmacc_vf_bf16m2_f32m4_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmacc_vf_bf16m2_f32m4_m(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmacc_vv_bf16m4_f32m8_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmacc_vv_bf16m4_f32m8_m(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmacc(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmacc_vf_bf16m4_f32m8_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmacc_vf_bf16m4_f32m8_m(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmacc_vv_bf16mf4_f32mf2_rm(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmacc_vv_bf16mf4_f32mf2_rm(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmacc_vf_bf16mf4_f32mf2_rm(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmacc_vf_bf16mf4_f32mf2_rm(vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmacc_vv_bf16mf2_f32m1_rm(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmacc_vv_bf16mf2_f32m1_rm(vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwnmacc(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmacc_vf_bf16mf2_f32m1_rm(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmacc_vf_bf16mf2_f32m1_rm(vfloat32m1_t vd, __bf16 vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwnmacc(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmacc_vv_bf16m1_f32m2_rm(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmacc_vv_bf16m1_f32m2_rm(vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmacc(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmacc_vf_bf16m1_f32m2_rm(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmacc_vf_bf16m1_f32m2_rm(vfloat32m2_t vd, __bf16 vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmacc(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmacc_vv_bf16m2_f32m4_rm(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmacc_vv_bf16m2_f32m4_rm(vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmacc(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmacc_vf_bf16m2_f32m4_rm(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmacc_vf_bf16m2_f32m4_rm(vfloat32m4_t vd, __bf16 vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmacc(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmacc_vv_bf16m4_f32m8_rm(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmacc_vv_bf16m4_f32m8_rm(vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmacc(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmacc_vf_bf16m4_f32m8_rm(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmacc_vf_bf16m4_f32m8_rm(vfloat32m8_t vd, __bf16 vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmacc(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmacc_vv_bf16mf4_f32mf2_rm_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmacc_vv_bf16mf4_f32mf2_rm_m(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmacc_vf_bf16mf4_f32mf2_rm_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmacc_vf_bf16mf4_f32mf2_rm_m(vbool64_t vm,
+ vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmacc_vv_bf16mf2_f32m1_rm_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmacc_vv_bf16mf2_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmacc_vf_bf16mf2_f32m1_rm_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmacc_vf_bf16mf2_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmacc_vv_bf16m1_f32m2_rm_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmacc_vv_bf16m1_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmacc(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmacc_vf_bf16m1_f32m2_rm_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmacc_vf_bf16m1_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmacc_vv_bf16m2_f32m4_rm_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmacc_vv_bf16m2_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmacc(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmacc_vf_bf16m2_f32m4_rm_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmacc_vf_bf16m2_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmacc_vv_bf16m4_f32m8_rm_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmacc_vv_bf16m4_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmacc(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmacc_vf_bf16m4_f32m8_rm_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmacc_vf_bf16m4_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfwnmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfwnmsac.c
new file mode 100644
index 0000000000000..ca936af7140e5
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfwnmsac.c
@@ -0,0 +1,480 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmsac_vv_bf16mf4_f32mf2(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmsac_vv_bf16mf4_f32mf2(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwnmsac(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmsac_vf_bf16mf4_f32mf2(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmsac_vf_bf16mf4_f32mf2(vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwnmsac(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmsac_vv_bf16mf2_f32m1(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmsac_vv_bf16mf2_f32m1(vfloat32m1_t vd, vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwnmsac(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmsac_vf_bf16mf2_f32m1(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmsac_vf_bf16mf2_f32m1(vfloat32m1_t vd, __bf16 vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwnmsac(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmsac_vv_bf16m1_f32m2(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmsac_vv_bf16m1_f32m2(vfloat32m2_t vd, vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmsac(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmsac_vf_bf16m1_f32m2(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmsac_vf_bf16m1_f32m2(vfloat32m2_t vd, __bf16 vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmsac(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmsac_vv_bf16m2_f32m4(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmsac_vv_bf16m2_f32m4(vfloat32m4_t vd, vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmsac(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmsac_vf_bf16m2_f32m4(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmsac_vf_bf16m2_f32m4(vfloat32m4_t vd, __bf16 vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmsac(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmsac_vv_bf16m4_f32m8(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmsac_vv_bf16m4_f32m8(vfloat32m8_t vd, vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmsac(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmsac_vf_bf16m4_f32m8(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmsac_vf_bf16m4_f32m8(vfloat32m8_t vd, __bf16 vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmsac(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmsac_vv_bf16mf4_f32mf2_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmsac_vv_bf16mf4_f32mf2_m(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwnmsac(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmsac_vf_bf16mf4_f32mf2_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmsac_vf_bf16mf4_f32mf2_m(vbool64_t vm, vfloat32mf2_t vd,
+ __bf16 vs1, vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmsac_vv_bf16mf2_f32m1_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmsac_vv_bf16mf2_f32m1_m(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwnmsac(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmsac_vf_bf16mf2_f32m1_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmsac_vf_bf16mf2_f32m1_m(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmsac_vv_bf16m1_f32m2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmsac_vv_bf16m1_f32m2_m(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmsac(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmsac_vf_bf16m1_f32m2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmsac_vf_bf16m1_f32m2_m(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmsac_vv_bf16m2_f32m4_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmsac_vv_bf16m2_f32m4_m(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmsac(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmsac_vf_bf16m2_f32m4_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmsac_vf_bf16m2_f32m4_m(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmsac_vv_bf16m4_f32m8_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmsac_vv_bf16m4_f32m8_m(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmsac(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmsac_vf_bf16m4_f32m8_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmsac_vf_bf16m4_f32m8_m(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmsac_vv_bf16mf4_f32mf2_rm(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmsac_vv_bf16mf4_f32mf2_rm(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmsac_vf_bf16mf4_f32mf2_rm(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmsac_vf_bf16mf4_f32mf2_rm(vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmsac_vv_bf16mf2_f32m1_rm(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmsac_vv_bf16mf2_f32m1_rm(vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwnmsac(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmsac_vf_bf16mf2_f32m1_rm(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmsac_vf_bf16mf2_f32m1_rm(vfloat32m1_t vd, __bf16 vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwnmsac(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmsac_vv_bf16m1_f32m2_rm(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmsac_vv_bf16m1_f32m2_rm(vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmsac(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmsac_vf_bf16m1_f32m2_rm(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmsac_vf_bf16m1_f32m2_rm(vfloat32m2_t vd, __bf16 vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmsac(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmsac_vv_bf16m2_f32m4_rm(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmsac_vv_bf16m2_f32m4_rm(vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmsac(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmsac_vf_bf16m2_f32m4_rm(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmsac_vf_bf16m2_f32m4_rm(vfloat32m4_t vd, __bf16 vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmsac(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmsac_vv_bf16m4_f32m8_rm(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmsac_vv_bf16m4_f32m8_rm(vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmsac(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmsac_vf_bf16m4_f32m8_rm(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmsac_vf_bf16m4_f32m8_rm(vfloat32m8_t vd, __bf16 vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmsac(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmsac_vv_bf16mf4_f32mf2_rm_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmsac_vv_bf16mf4_f32mf2_rm_m(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmsac_vf_bf16mf4_f32mf2_rm_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmsac_vf_bf16mf4_f32mf2_rm_m(vbool64_t vm,
+ vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmsac_vv_bf16mf2_f32m1_rm_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmsac_vv_bf16mf2_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmsac_vf_bf16mf2_f32m1_rm_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmsac_vf_bf16mf2_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmsac_vv_bf16m1_f32m2_rm_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmsac_vv_bf16m1_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmsac(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmsac_vf_bf16m1_f32m2_rm_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmsac_vf_bf16m1_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmsac_vv_bf16m2_f32m4_rm_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmsac_vv_bf16m2_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmsac(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmsac_vf_bf16m2_f32m4_rm_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmsac_vf_bf16m2_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmsac_vv_bf16m4_f32m8_rm_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmsac_vv_bf16m4_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmsac(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmsac_vf_bf16m4_f32m8_rm_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmsac_vf_bf16m4_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfwsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfwsub.c
new file mode 100644
index 0000000000000..2e22e22e4b6fc
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfwsub.c
@@ -0,0 +1,893 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_vv_bf16mf4_f32mf2(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_vv_bf16mf4_f32mf2(vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_vf_bf16mf4_f32mf2(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_vf_bf16mf4_f32mf2(vbfloat16mf4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_wv_bf16mf4_f32mf2(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[VS2]], <vscale x 1 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_wv_bf16mf4_f32mf2(vfloat32mf2_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_wf_bf16_f32mf2(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.nxv1f32.bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_wf_bf16_f32mf2(vfloat32mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_vv_bf16mf2_f32m1(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_vv_bf16mf2_f32m1(vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_vf_bf16mf2_f32m1(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_vf_bf16mf2_f32m1(vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_wv_bf16mf2_f32m1(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[VS2]], <vscale x 2 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_wv_bf16mf2_f32m1(vfloat32m1_t vs2, vbfloat16mf2_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_wv(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_wf_bf16_f32m1(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.nxv2f32.bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_wf_bf16_f32m1(vfloat32m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_vv_bf16m1_f32m2(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_vv_bf16m1_f32m2(vbfloat16m1_t vs2, vbfloat16m1_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_vv(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_vf_bf16m1_f32m2(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_vf_bf16m1_f32m2(vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_wv_bf16m1_f32m2(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[VS2]], <vscale x 4 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_wv_bf16m1_f32m2(vfloat32m2_t vs2, vbfloat16m1_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_wv(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_wf_bf16_f32m2(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.nxv4f32.bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_wf_bf16_f32m2(vfloat32m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_vv_bf16m2_f32m4(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_vv_bf16m2_f32m4(vbfloat16m2_t vs2, vbfloat16m2_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_vv(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_vf_bf16m2_f32m4(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_vf_bf16m2_f32m4(vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_wv_bf16m2_f32m4(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[VS2]], <vscale x 8 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_wv_bf16m2_f32m4(vfloat32m4_t vs2, vbfloat16m2_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_wv(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_wf_bf16_f32m4(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.nxv8f32.bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_wf_bf16_f32m4(vfloat32m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_vv_bf16m4_f32m8(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_vv_bf16m4_f32m8(vbfloat16m4_t vs2, vbfloat16m4_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_vv(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_vf_bf16m4_f32m8(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_vf_bf16m4_f32m8(vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_wv_bf16m4_f32m8(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[VS2]], <vscale x 16 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_wv_bf16m4_f32m8(vfloat32m8_t vs2, vbfloat16m4_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_wv(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_wf_bf16_f32m8(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.nxv16f32.bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_wf_bf16_f32m8(vfloat32m8_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf(vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_vv_bf16mf4_f32mf2_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_vv_bf16mf4_f32mf2_m(vbool64_t vm, vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_vf_bf16mf4_f32mf2_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_vf_bf16mf4_f32mf2_m(vbool64_t vm, vbfloat16mf4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_vf(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_wv_bf16mf4_f32mf2_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.mask.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_wv_bf16mf4_f32mf2_m(vbool64_t vm, vfloat32mf2_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_wf_bf16_f32mf2_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.mask.nxv1f32.bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_wf_bf16_f32mf2_m(vbool64_t vm, vfloat32mf2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_wf(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_vv_bf16mf2_f32m1_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_vv_bf16mf2_f32m1_m(vbool32_t vm, vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_vf_bf16mf2_f32m1_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_vf_bf16mf2_f32m1_m(vbool32_t vm, vbfloat16mf2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_vf(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_wv_bf16mf2_f32m1_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.mask.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_wv_bf16mf2_f32m1_m(vbool32_t vm, vfloat32m1_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_wf_bf16_f32m1_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.mask.nxv2f32.bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_wf_bf16_f32m1_m(vbool32_t vm, vfloat32m1_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_wf(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_vv_bf16m1_f32m2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_vv_bf16m1_f32m2_m(vbool16_t vm, vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_vf_bf16m1_f32m2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_vf_bf16m1_f32m2_m(vbool16_t vm, vbfloat16m1_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_vf(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_wv_bf16m1_f32m2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.mask.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_wv_bf16m1_f32m2_m(vbool16_t vm, vfloat32m2_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_wf_bf16_f32m2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.mask.nxv4f32.bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_wf_bf16_f32m2_m(vbool16_t vm, vfloat32m2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_wf(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_vv_bf16m2_f32m4_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_vv_bf16m2_f32m4_m(vbool8_t vm, vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_vf_bf16m2_f32m4_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_vf_bf16m2_f32m4_m(vbool8_t vm, vbfloat16m2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_vf(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_wv_bf16m2_f32m4_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.mask.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_wv_bf16m2_f32m4_m(vbool8_t vm, vfloat32m4_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_wf_bf16_f32m4_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.mask.nxv8f32.bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_wf_bf16_f32m4_m(vbool8_t vm, vfloat32m4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_wf(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_vv_bf16m4_f32m8_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_vv_bf16m4_f32m8_m(vbool4_t vm, vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_vf_bf16m4_f32m8_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_vf_bf16m4_f32m8_m(vbool4_t vm, vbfloat16m4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_vf(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_wv_bf16m4_f32m8_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.mask.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_wv_bf16m4_f32m8_m(vbool4_t vm, vfloat32m8_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_wf_bf16_f32m8_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.mask.nxv16f32.bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_wf_bf16_f32m8_m(vbool4_t vm, vfloat32m8_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_wf(vm, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_vv_bf16mf4_f32mf2_rm(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_vv_bf16mf4_f32mf2_rm(vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_vf_bf16mf4_f32mf2_rm(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_vf_bf16mf4_f32mf2_rm(vbfloat16mf4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_wv_bf16mf4_f32mf2_rm(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[VS2]], <vscale x 1 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_wv_bf16mf4_f32mf2_rm(vfloat32mf2_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_wf_bf16_f32mf2_rm(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.nxv1f32.bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_wf_bf16_f32mf2_rm(vfloat32mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_vv_bf16mf2_f32m1_rm(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_vv_bf16mf2_f32m1_rm(vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_vf_bf16mf2_f32m1_rm(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_vf_bf16mf2_f32m1_rm(vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_wv_bf16mf2_f32m1_rm(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[VS2]], <vscale x 2 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_wv_bf16mf2_f32m1_rm(vfloat32m1_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_wf_bf16_f32m1_rm(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.nxv2f32.bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_wf_bf16_f32m1_rm(vfloat32m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_vv_bf16m1_f32m2_rm(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_vv_bf16m1_f32m2_rm(vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_vf_bf16m1_f32m2_rm(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_vf_bf16m1_f32m2_rm(vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_wv_bf16m1_f32m2_rm(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[VS2]], <vscale x 4 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_wv_bf16m1_f32m2_rm(vfloat32m2_t vs2, vbfloat16m1_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_wv(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_wf_bf16_f32m2_rm(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.nxv4f32.bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_wf_bf16_f32m2_rm(vfloat32m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_vv_bf16m2_f32m4_rm(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_vv_bf16m2_f32m4_rm(vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_vf_bf16m2_f32m4_rm(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_vf_bf16m2_f32m4_rm(vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_wv_bf16m2_f32m4_rm(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[VS2]], <vscale x 8 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_wv_bf16m2_f32m4_rm(vfloat32m4_t vs2, vbfloat16m2_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_wv(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_wf_bf16_f32m4_rm(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.nxv8f32.bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_wf_bf16_f32m4_rm(vfloat32m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_vv_bf16m4_f32m8_rm(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_vv_bf16m4_f32m8_rm(vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_vf_bf16m4_f32m8_rm(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_vf_bf16m4_f32m8_rm(vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_wv_bf16m4_f32m8_rm(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[VS2]], <vscale x 16 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_wv_bf16m4_f32m8_rm(vfloat32m8_t vs2, vbfloat16m4_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_wv(vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_wf_bf16_f32m8_rm(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.nxv16f32.bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_wf_bf16_f32m8_rm(vfloat32m8_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf(vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_vv_bf16mf4_f32mf2_rm_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_vv_bf16mf4_f32mf2_rm_m(vbool64_t vm,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_vv(vm, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_vf_bf16mf4_f32mf2_rm_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_vf_bf16mf4_f32mf2_rm_m(vbool64_t vm,
+ vbfloat16mf4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf(vm, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_wv_bf16mf4_f32mf2_rm_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.mask.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_wv_bf16mf4_f32mf2_rm_m(vbool64_t vm,
+ vfloat32mf2_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_wv(vm, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_wf_bf16_f32mf2_rm_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.mask.nxv1f32.bf16.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_wf_bf16_f32mf2_rm_m(vbool64_t vm, vfloat32mf2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_wf(vm, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_vv_bf16mf2_f32m1_rm_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_vv_bf16mf2_f32m1_rm_m(vbool32_t vm, vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv(vm, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_vf_bf16mf2_f32m1_rm_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_vf_bf16mf2_f32m1_rm_m(vbool32_t vm, vbfloat16mf2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_vf(vm, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_wv_bf16mf2_f32m1_rm_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.mask.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_wv_bf16mf2_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv(vm, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_wf_bf16_f32m1_rm_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.mask.nxv2f32.bf16.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_wf_bf16_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_wf(vm, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_vv_bf16m1_f32m2_rm_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_vv_bf16m1_f32m2_rm_m(vbool16_t vm, vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv(vm, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_vf_bf16m1_f32m2_rm_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_vf_bf16m1_f32m2_rm_m(vbool16_t vm, vbfloat16m1_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_vf(vm, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_wv_bf16m1_f32m2_rm_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.mask.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_wv_bf16m1_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv(vm, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_wf_bf16_f32m2_rm_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.mask.nxv4f32.bf16.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_wf_bf16_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_wf(vm, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_vv_bf16m2_f32m4_rm_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_vv_bf16m2_f32m4_rm_m(vbool8_t vm, vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv(vm, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_vf_bf16m2_f32m4_rm_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_vf_bf16m2_f32m4_rm_m(vbool8_t vm, vbfloat16m2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_vf(vm, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_wv_bf16m2_f32m4_rm_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.mask.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_wv_bf16m2_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv(vm, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_wf_bf16_f32m4_rm_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.mask.nxv8f32.bf16.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_wf_bf16_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_wf(vm, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_vv_bf16m4_f32m8_rm_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_vv_bf16m4_f32m8_rm_m(vbool4_t vm, vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv(vm, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_vf_bf16m4_f32m8_rm_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_vf_bf16m4_f32m8_rm_m(vbool4_t vm, vbfloat16m4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_vf(vm, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_wv_bf16m4_f32m8_rm_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.mask.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_wv_bf16m4_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv(vm, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_wf_bf16_f32m8_rm_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.mask.nxv16f32.bf16.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_wf_bf16_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_wf(vm, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vmfeq.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vmfeq.c
new file mode 100644
index 0000000000000..29881c9ba5d5e
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vmfeq.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfeq_vv_bf16mf4_b64(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfeq.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfeq_vv_bf16mf4_b64(vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vmfeq(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfeq_vf_bf16mf4_b64(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfeq.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfeq_vf_bf16mf4_b64(vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfeq(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfeq_vv_bf16mf2_b32(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfeq.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfeq_vv_bf16mf2_b32(vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vmfeq(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfeq_vf_bf16mf2_b32(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfeq.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfeq_vf_bf16mf2_b32(vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfeq(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfeq_vv_bf16m1_b16(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfeq.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfeq_vv_bf16m1_b16(vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vmfeq(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfeq_vf_bf16m1_b16(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfeq.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfeq_vf_bf16m1_b16(vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfeq(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfeq_vv_bf16m2_b8(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfeq.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfeq_vv_bf16m2_b8(vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vmfeq(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfeq_vf_bf16m2_b8(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfeq.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfeq_vf_bf16m2_b8(vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfeq(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfeq_vv_bf16m4_b4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfeq.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfeq_vv_bf16m4_b4(vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vmfeq(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfeq_vf_bf16m4_b4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfeq.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfeq_vf_bf16m4_b4(vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfeq(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfeq_vv_bf16m8_b2(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfeq.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfeq_vv_bf16m8_b2(vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vmfeq(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfeq_vf_bf16m8_b2(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfeq.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfeq_vf_bf16m8_b2(vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfeq(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfeq_vv_bf16mf4_b64_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfeq.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x i1> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfeq_vv_bf16mf4_b64_m(vbool64_t mask, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vmfeq(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfeq_vf_bf16mf4_b64_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfeq.mask.nxv1bf16.bf16.i64(<vscale x 1 x i1> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfeq_vf_bf16mf4_b64_m(vbool64_t mask, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfeq(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfeq_vv_bf16mf2_b32_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfeq.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x i1> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfeq_vv_bf16mf2_b32_m(vbool32_t mask, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vmfeq(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfeq_vf_bf16mf2_b32_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfeq.mask.nxv2bf16.bf16.i64(<vscale x 2 x i1> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfeq_vf_bf16mf2_b32_m(vbool32_t mask, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfeq(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfeq_vv_bf16m1_b16_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfeq.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x i1> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfeq_vv_bf16m1_b16_m(vbool16_t mask, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vmfeq(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfeq_vf_bf16m1_b16_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfeq.mask.nxv4bf16.bf16.i64(<vscale x 4 x i1> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfeq_vf_bf16m1_b16_m(vbool16_t mask, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfeq(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfeq_vv_bf16m2_b8_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfeq.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x i1> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfeq_vv_bf16m2_b8_m(vbool8_t mask, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vmfeq(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfeq_vf_bf16m2_b8_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfeq.mask.nxv8bf16.bf16.i64(<vscale x 8 x i1> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfeq_vf_bf16m2_b8_m(vbool8_t mask, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfeq(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfeq_vv_bf16m4_b4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfeq.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x i1> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfeq_vv_bf16m4_b4_m(vbool4_t mask, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vmfeq(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfeq_vf_bf16m4_b4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfeq.mask.nxv16bf16.bf16.i64(<vscale x 16 x i1> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfeq_vf_bf16m4_b4_m(vbool4_t mask, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfeq(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfeq_vv_bf16m8_b2_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfeq.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x i1> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfeq_vv_bf16m8_b2_m(vbool2_t mask, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vmfeq(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfeq_vf_bf16m8_b2_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfeq.mask.nxv32bf16.bf16.i64(<vscale x 32 x i1> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfeq_vf_bf16m8_b2_m(vbool2_t mask, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfeq(mask, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vmfge.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vmfge.c
new file mode 100644
index 0000000000000..b8083c5ebb8df
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vmfge.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfge_vv_bf16mf4_b64(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfge.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfge_vv_bf16mf4_b64(vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vmfge(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfge_vf_bf16mf4_b64(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfge.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfge_vf_bf16mf4_b64(vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfge(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfge_vv_bf16mf2_b32(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfge.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfge_vv_bf16mf2_b32(vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vmfge(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfge_vf_bf16mf2_b32(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfge.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfge_vf_bf16mf2_b32(vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfge(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfge_vv_bf16m1_b16(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfge.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfge_vv_bf16m1_b16(vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vmfge(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfge_vf_bf16m1_b16(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfge.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfge_vf_bf16m1_b16(vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfge(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfge_vv_bf16m2_b8(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfge.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfge_vv_bf16m2_b8(vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vmfge(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfge_vf_bf16m2_b8(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfge.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfge_vf_bf16m2_b8(vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfge(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfge_vv_bf16m4_b4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfge.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfge_vv_bf16m4_b4(vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vmfge(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfge_vf_bf16m4_b4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfge.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfge_vf_bf16m4_b4(vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfge(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfge_vv_bf16m8_b2(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfge.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfge_vv_bf16m8_b2(vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vmfge(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfge_vf_bf16m8_b2(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfge.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfge_vf_bf16m8_b2(vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfge(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfge_vv_bf16mf4_b64_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfge.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x i1> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfge_vv_bf16mf4_b64_m(vbool64_t mask, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vmfge(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfge_vf_bf16mf4_b64_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfge.mask.nxv1bf16.bf16.i64(<vscale x 1 x i1> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfge_vf_bf16mf4_b64_m(vbool64_t mask, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfge(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfge_vv_bf16mf2_b32_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfge.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x i1> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfge_vv_bf16mf2_b32_m(vbool32_t mask, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vmfge(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfge_vf_bf16mf2_b32_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfge.mask.nxv2bf16.bf16.i64(<vscale x 2 x i1> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfge_vf_bf16mf2_b32_m(vbool32_t mask, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfge(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfge_vv_bf16m1_b16_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfge.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x i1> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfge_vv_bf16m1_b16_m(vbool16_t mask, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vmfge(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfge_vf_bf16m1_b16_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfge.mask.nxv4bf16.bf16.i64(<vscale x 4 x i1> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfge_vf_bf16m1_b16_m(vbool16_t mask, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfge(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfge_vv_bf16m2_b8_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfge.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x i1> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfge_vv_bf16m2_b8_m(vbool8_t mask, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vmfge(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfge_vf_bf16m2_b8_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfge.mask.nxv8bf16.bf16.i64(<vscale x 8 x i1> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfge_vf_bf16m2_b8_m(vbool8_t mask, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfge(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfge_vv_bf16m4_b4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfge.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x i1> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfge_vv_bf16m4_b4_m(vbool4_t mask, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vmfge(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfge_vf_bf16m4_b4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfge.mask.nxv16bf16.bf16.i64(<vscale x 16 x i1> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfge_vf_bf16m4_b4_m(vbool4_t mask, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfge(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfge_vv_bf16m8_b2_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfge.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x i1> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfge_vv_bf16m8_b2_m(vbool2_t mask, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vmfge(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfge_vf_bf16m8_b2_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfge.mask.nxv32bf16.bf16.i64(<vscale x 32 x i1> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfge_vf_bf16m8_b2_m(vbool2_t mask, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfge(mask, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vmfgt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vmfgt.c
new file mode 100644
index 0000000000000..b8749b3295a19
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vmfgt.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfgt_vv_bf16mf4_b64(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfgt.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfgt_vv_bf16mf4_b64(vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vmfgt(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfgt_vf_bf16mf4_b64(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfgt.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfgt_vf_bf16mf4_b64(vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfgt(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfgt_vv_bf16mf2_b32(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfgt.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfgt_vv_bf16mf2_b32(vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vmfgt(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfgt_vf_bf16mf2_b32(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfgt.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfgt_vf_bf16mf2_b32(vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfgt(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfgt_vv_bf16m1_b16(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfgt.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfgt_vv_bf16m1_b16(vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vmfgt(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfgt_vf_bf16m1_b16(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfgt.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfgt_vf_bf16m1_b16(vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfgt(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfgt_vv_bf16m2_b8(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfgt.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfgt_vv_bf16m2_b8(vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vmfgt(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfgt_vf_bf16m2_b8(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfgt.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfgt_vf_bf16m2_b8(vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfgt(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfgt_vv_bf16m4_b4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfgt.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfgt_vv_bf16m4_b4(vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vmfgt(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfgt_vf_bf16m4_b4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfgt.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfgt_vf_bf16m4_b4(vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfgt(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfgt_vv_bf16m8_b2(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfgt.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfgt_vv_bf16m8_b2(vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vmfgt(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfgt_vf_bf16m8_b2(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfgt.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfgt_vf_bf16m8_b2(vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfgt(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfgt_vv_bf16mf4_b64_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfgt.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x i1> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfgt_vv_bf16mf4_b64_m(vbool64_t mask, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vmfgt(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfgt_vf_bf16mf4_b64_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfgt.mask.nxv1bf16.bf16.i64(<vscale x 1 x i1> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfgt_vf_bf16mf4_b64_m(vbool64_t mask, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfgt(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfgt_vv_bf16mf2_b32_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfgt.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x i1> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfgt_vv_bf16mf2_b32_m(vbool32_t mask, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vmfgt(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfgt_vf_bf16mf2_b32_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfgt.mask.nxv2bf16.bf16.i64(<vscale x 2 x i1> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfgt_vf_bf16mf2_b32_m(vbool32_t mask, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfgt(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfgt_vv_bf16m1_b16_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfgt.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x i1> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfgt_vv_bf16m1_b16_m(vbool16_t mask, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vmfgt(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfgt_vf_bf16m1_b16_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfgt.mask.nxv4bf16.bf16.i64(<vscale x 4 x i1> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfgt_vf_bf16m1_b16_m(vbool16_t mask, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfgt(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfgt_vv_bf16m2_b8_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfgt.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x i1> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfgt_vv_bf16m2_b8_m(vbool8_t mask, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vmfgt(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfgt_vf_bf16m2_b8_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfgt.mask.nxv8bf16.bf16.i64(<vscale x 8 x i1> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfgt_vf_bf16m2_b8_m(vbool8_t mask, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfgt(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfgt_vv_bf16m4_b4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfgt.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x i1> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfgt_vv_bf16m4_b4_m(vbool4_t mask, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vmfgt(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfgt_vf_bf16m4_b4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfgt.mask.nxv16bf16.bf16.i64(<vscale x 16 x i1> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfgt_vf_bf16m4_b4_m(vbool4_t mask, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfgt(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfgt_vv_bf16m8_b2_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfgt.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x i1> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfgt_vv_bf16m8_b2_m(vbool2_t mask, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vmfgt(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfgt_vf_bf16m8_b2_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfgt.mask.nxv32bf16.bf16.i64(<vscale x 32 x i1> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfgt_vf_bf16m8_b2_m(vbool2_t mask, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfgt(mask, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vmfle.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vmfle.c
new file mode 100644
index 0000000000000..724608c89e8de
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vmfle.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfle_vv_bf16mf4_b64(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfle.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfle_vv_bf16mf4_b64(vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vmfle(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfle_vf_bf16mf4_b64(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfle.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfle_vf_bf16mf4_b64(vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfle(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfle_vv_bf16mf2_b32(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfle.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfle_vv_bf16mf2_b32(vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vmfle(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfle_vf_bf16mf2_b32(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfle.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfle_vf_bf16mf2_b32(vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfle(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfle_vv_bf16m1_b16(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfle.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfle_vv_bf16m1_b16(vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vmfle(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfle_vf_bf16m1_b16(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfle.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfle_vf_bf16m1_b16(vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfle(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfle_vv_bf16m2_b8(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfle.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfle_vv_bf16m2_b8(vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vmfle(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfle_vf_bf16m2_b8(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfle.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfle_vf_bf16m2_b8(vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfle(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfle_vv_bf16m4_b4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfle.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfle_vv_bf16m4_b4(vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vmfle(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfle_vf_bf16m4_b4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfle.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfle_vf_bf16m4_b4(vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfle(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfle_vv_bf16m8_b2(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfle.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfle_vv_bf16m8_b2(vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vmfle(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfle_vf_bf16m8_b2(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfle.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfle_vf_bf16m8_b2(vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfle(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfle_vv_bf16mf4_b64_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfle.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x i1> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfle_vv_bf16mf4_b64_m(vbool64_t mask, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vmfle(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfle_vf_bf16mf4_b64_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfle.mask.nxv1bf16.bf16.i64(<vscale x 1 x i1> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfle_vf_bf16mf4_b64_m(vbool64_t mask, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfle(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfle_vv_bf16mf2_b32_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfle.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x i1> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfle_vv_bf16mf2_b32_m(vbool32_t mask, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vmfle(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfle_vf_bf16mf2_b32_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfle.mask.nxv2bf16.bf16.i64(<vscale x 2 x i1> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfle_vf_bf16mf2_b32_m(vbool32_t mask, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfle(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfle_vv_bf16m1_b16_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfle.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x i1> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfle_vv_bf16m1_b16_m(vbool16_t mask, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vmfle(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfle_vf_bf16m1_b16_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfle.mask.nxv4bf16.bf16.i64(<vscale x 4 x i1> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfle_vf_bf16m1_b16_m(vbool16_t mask, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfle(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfle_vv_bf16m2_b8_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfle.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x i1> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfle_vv_bf16m2_b8_m(vbool8_t mask, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vmfle(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfle_vf_bf16m2_b8_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfle.mask.nxv8bf16.bf16.i64(<vscale x 8 x i1> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfle_vf_bf16m2_b8_m(vbool8_t mask, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfle(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfle_vv_bf16m4_b4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfle.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x i1> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfle_vv_bf16m4_b4_m(vbool4_t mask, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vmfle(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfle_vf_bf16m4_b4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfle.mask.nxv16bf16.bf16.i64(<vscale x 16 x i1> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfle_vf_bf16m4_b4_m(vbool4_t mask, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfle(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfle_vv_bf16m8_b2_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfle.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x i1> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfle_vv_bf16m8_b2_m(vbool2_t mask, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vmfle(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfle_vf_bf16m8_b2_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfle.mask.nxv32bf16.bf16.i64(<vscale x 32 x i1> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfle_vf_bf16m8_b2_m(vbool2_t mask, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfle(mask, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vmflt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vmflt.c
new file mode 100644
index 0000000000000..1b0b898d5053f
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vmflt.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmflt_vv_bf16mf4_b64(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmflt.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmflt_vv_bf16mf4_b64(vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vmflt(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmflt_vf_bf16mf4_b64(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmflt.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmflt_vf_bf16mf4_b64(vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmflt(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmflt_vv_bf16mf2_b32(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmflt.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmflt_vv_bf16mf2_b32(vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vmflt(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmflt_vf_bf16mf2_b32(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmflt.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmflt_vf_bf16mf2_b32(vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmflt(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmflt_vv_bf16m1_b16(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmflt.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmflt_vv_bf16m1_b16(vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vmflt(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmflt_vf_bf16m1_b16(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmflt.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmflt_vf_bf16m1_b16(vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmflt(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmflt_vv_bf16m2_b8(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmflt.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmflt_vv_bf16m2_b8(vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vmflt(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmflt_vf_bf16m2_b8(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmflt.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmflt_vf_bf16m2_b8(vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmflt(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmflt_vv_bf16m4_b4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmflt.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmflt_vv_bf16m4_b4(vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vmflt(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmflt_vf_bf16m4_b4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmflt.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmflt_vf_bf16m4_b4(vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmflt(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmflt_vv_bf16m8_b2(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmflt.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmflt_vv_bf16m8_b2(vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vmflt(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmflt_vf_bf16m8_b2(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmflt.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmflt_vf_bf16m8_b2(vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmflt(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmflt_vv_bf16mf4_b64_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmflt.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x i1> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmflt_vv_bf16mf4_b64_m(vbool64_t mask, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vmflt(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmflt_vf_bf16mf4_b64_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmflt.mask.nxv1bf16.bf16.i64(<vscale x 1 x i1> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmflt_vf_bf16mf4_b64_m(vbool64_t mask, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmflt(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmflt_vv_bf16mf2_b32_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmflt.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x i1> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmflt_vv_bf16mf2_b32_m(vbool32_t mask, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vmflt(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmflt_vf_bf16mf2_b32_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmflt.mask.nxv2bf16.bf16.i64(<vscale x 2 x i1> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmflt_vf_bf16mf2_b32_m(vbool32_t mask, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmflt(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmflt_vv_bf16m1_b16_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmflt.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x i1> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmflt_vv_bf16m1_b16_m(vbool16_t mask, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vmflt(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmflt_vf_bf16m1_b16_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmflt.mask.nxv4bf16.bf16.i64(<vscale x 4 x i1> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmflt_vf_bf16m1_b16_m(vbool16_t mask, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmflt(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmflt_vv_bf16m2_b8_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmflt.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x i1> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmflt_vv_bf16m2_b8_m(vbool8_t mask, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vmflt(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmflt_vf_bf16m2_b8_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmflt.mask.nxv8bf16.bf16.i64(<vscale x 8 x i1> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmflt_vf_bf16m2_b8_m(vbool8_t mask, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmflt(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmflt_vv_bf16m4_b4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmflt.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x i1> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmflt_vv_bf16m4_b4_m(vbool4_t mask, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vmflt(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmflt_vf_bf16m4_b4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmflt.mask.nxv16bf16.bf16.i64(<vscale x 16 x i1> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmflt_vf_bf16m4_b4_m(vbool4_t mask, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmflt(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmflt_vv_bf16m8_b2_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmflt.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x i1> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmflt_vv_bf16m8_b2_m(vbool2_t mask, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vmflt(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmflt_vf_bf16m8_b2_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmflt.mask.nxv32bf16.bf16.i64(<vscale x 32 x i1> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmflt_vf_bf16m8_b2_m(vbool2_t mask, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmflt(mask, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vmfne.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vmfne.c
new file mode 100644
index 0000000000000..672c1504d73eb
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vmfne.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfne_vv_bf16mf4_b64(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfne.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfne_vv_bf16mf4_b64(vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vmfne(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfne_vf_bf16mf4_b64(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfne.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfne_vf_bf16mf4_b64(vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfne(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfne_vv_bf16mf2_b32(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfne.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfne_vv_bf16mf2_b32(vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vmfne(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfne_vf_bf16mf2_b32(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfne.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfne_vf_bf16mf2_b32(vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfne(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfne_vv_bf16m1_b16(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfne.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfne_vv_bf16m1_b16(vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vmfne(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfne_vf_bf16m1_b16(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfne.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfne_vf_bf16m1_b16(vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfne(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfne_vv_bf16m2_b8(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfne.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfne_vv_bf16m2_b8(vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vmfne(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfne_vf_bf16m2_b8(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfne.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfne_vf_bf16m2_b8(vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfne(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfne_vv_bf16m4_b4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfne.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfne_vv_bf16m4_b4(vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vmfne(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfne_vf_bf16m4_b4(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfne.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfne_vf_bf16m4_b4(vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfne(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfne_vv_bf16m8_b2(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfne.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfne_vv_bf16m8_b2(vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vmfne(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfne_vf_bf16m8_b2(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfne.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfne_vf_bf16m8_b2(vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfne(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfne_vv_bf16mf4_b64_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x i1> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfne_vv_bf16mf4_b64_m(vbool64_t mask, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vmfne(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfne_vf_bf16mf4_b64_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1bf16.bf16.i64(<vscale x 1 x i1> poison, <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfne_vf_bf16mf4_b64_m(vbool64_t mask, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfne(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfne_vv_bf16mf2_b32_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfne.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x i1> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfne_vv_bf16mf2_b32_m(vbool32_t mask, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vmfne(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfne_vf_bf16mf2_b32_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfne.mask.nxv2bf16.bf16.i64(<vscale x 2 x i1> poison, <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfne_vf_bf16mf2_b32_m(vbool32_t mask, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfne(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfne_vv_bf16m1_b16_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfne.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x i1> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfne_vv_bf16m1_b16_m(vbool16_t mask, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vmfne(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfne_vf_bf16m1_b16_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfne.mask.nxv4bf16.bf16.i64(<vscale x 4 x i1> poison, <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfne_vf_bf16m1_b16_m(vbool16_t mask, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfne(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfne_vv_bf16m2_b8_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfne.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x i1> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfne_vv_bf16m2_b8_m(vbool8_t mask, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vmfne(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfne_vf_bf16m2_b8_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfne.mask.nxv8bf16.bf16.i64(<vscale x 8 x i1> poison, <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfne_vf_bf16m2_b8_m(vbool8_t mask, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfne(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfne_vv_bf16m4_b4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfne.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x i1> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfne_vv_bf16m4_b4_m(vbool4_t mask, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vmfne(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfne_vf_bf16m4_b4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfne.mask.nxv16bf16.bf16.i64(<vscale x 16 x i1> poison, <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfne_vf_bf16m4_b4_m(vbool4_t mask, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfne(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfne_vv_bf16m8_b2_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfne.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x i1> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfne_vv_bf16m8_b2_m(vbool2_t mask, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vmfne(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfne_vf_bf16m8_b2_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfne.mask.nxv32bf16.bf16.i64(<vscale x 32 x i1> poison, <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfne_vf_bf16m8_b2_m(vbool2_t mask, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfne(mask, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfadd.c
new file mode 100644
index 0000000000000..6d55279ae306f
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfadd.c
@@ -0,0 +1,489 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfadd_vv_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfadd.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfadd_vv_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfadd_vv_bf16mf4_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfadd_vf_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfadd.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfadd_vf_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_vf_bf16mf4_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfadd_vv_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfadd.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfadd_vv_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfadd_vv_bf16mf2_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfadd_vf_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfadd.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfadd_vf_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_vf_bf16mf2_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfadd_vv_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfadd.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfadd_vv_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfadd_vv_bf16m1_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfadd_vf_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfadd.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfadd_vf_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_vf_bf16m1_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfadd_vv_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfadd.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfadd_vv_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfadd_vv_bf16m2_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfadd_vf_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfadd.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfadd_vf_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_vf_bf16m2_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfadd_vv_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfadd.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfadd_vv_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfadd_vv_bf16m4_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfadd_vf_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfadd.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfadd_vf_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_vf_bf16m4_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfadd_vv_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfadd.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfadd_vv_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfadd_vv_bf16m8_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfadd_vf_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfadd.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfadd_vf_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_vf_bf16m8_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfadd_vv_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfadd.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfadd_vv_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfadd_vv_bf16mf4_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfadd_vf_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfadd.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfadd_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_vf_bf16mf4_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfadd_vv_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfadd.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfadd_vv_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfadd_vv_bf16mf2_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfadd_vf_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfadd.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfadd_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_vf_bf16mf2_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfadd_vv_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfadd.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfadd_vv_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfadd_vv_bf16m1_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfadd_vf_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfadd.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfadd_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_vf_bf16m1_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfadd_vv_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfadd.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfadd_vv_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfadd_vv_bf16m2_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfadd_vf_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfadd.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfadd_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_vf_bf16m2_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfadd_vv_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfadd.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfadd_vv_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfadd_vv_bf16m4_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfadd_vf_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfadd.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfadd_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_vf_bf16m4_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfadd_vv_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfadd.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfadd_vv_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfadd_vv_bf16m8_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfadd_vf_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfadd.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfadd_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_vf_bf16m8_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfadd_vv_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfadd.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfadd_vv_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfadd_vv_bf16mf4_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfadd_vf_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfadd.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfadd_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_vf_bf16mf4_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfadd_vv_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfadd.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfadd_vv_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfadd_vv_bf16mf2_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfadd_vf_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfadd.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfadd_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_vf_bf16mf2_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfadd_vv_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfadd.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfadd_vv_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfadd_vv_bf16m1_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfadd_vf_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfadd.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfadd_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_vf_bf16m1_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfadd_vv_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfadd.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfadd_vv_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfadd_vv_bf16m2_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfadd_vf_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfadd.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfadd_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_vf_bf16m2_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfadd_vv_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfadd.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfadd_vv_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfadd_vv_bf16m4_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfadd_vf_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfadd.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfadd_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_vf_bf16m4_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfadd_vv_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfadd.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfadd_vv_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfadd_vv_bf16m8_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfadd_vf_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfadd.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfadd_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_vf_bf16m8_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfadd_vv_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfadd.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfadd_vv_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfadd_vv_bf16mf4_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfadd_vf_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfadd.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfadd_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_vf_bf16mf4_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfadd_vv_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfadd.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfadd_vv_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfadd_vv_bf16mf2_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfadd_vf_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfadd.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfadd_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_vf_bf16mf2_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfadd_vv_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfadd.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfadd_vv_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfadd_vv_bf16m1_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfadd_vf_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfadd.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfadd_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_vf_bf16m1_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfadd_vv_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfadd.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfadd_vv_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfadd_vv_bf16m2_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfadd_vf_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfadd.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfadd_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_vf_bf16m2_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfadd_vv_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfadd.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfadd_vv_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfadd_vv_bf16m4_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfadd_vf_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfadd.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfadd_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_vf_bf16m4_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfadd_vv_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfadd.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfadd_vv_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfadd_vv_bf16m8_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfadd_vf_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfadd.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfadd_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_vf_bf16m8_mu(mask, maskedoff, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfclass.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfclass.c
new file mode 100644
index 0000000000000..8e6946de398b0
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfclass.c
@@ -0,0 +1,272 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vfclass_v_bf16mf4_u16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfclass.nxv1bf16.i64(<vscale x 1 x i16> [[VD]], <vscale x 1 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vfclass_v_bf16mf4_u16mf4_tu(vuint16mf4_t vd,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfclass_v_bf16mf4_u16mf4_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vfclass_v_bf16mf2_u16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfclass.nxv2bf16.i64(<vscale x 2 x i16> [[VD]], <vscale x 2 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vfclass_v_bf16mf2_u16mf2_tu(vuint16mf2_t vd,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfclass_v_bf16mf2_u16mf2_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vfclass_v_bf16m1_u16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfclass.nxv4bf16.i64(<vscale x 4 x i16> [[VD]], <vscale x 4 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vfclass_v_bf16m1_u16m1_tu(vuint16m1_t vd, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfclass_v_bf16m1_u16m1_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vfclass_v_bf16m2_u16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfclass.nxv8bf16.i64(<vscale x 8 x i16> [[VD]], <vscale x 8 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vfclass_v_bf16m2_u16m2_tu(vuint16m2_t vd, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfclass_v_bf16m2_u16m2_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vfclass_v_bf16m4_u16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfclass.nxv16bf16.i64(<vscale x 16 x i16> [[VD]], <vscale x 16 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vfclass_v_bf16m4_u16m4_tu(vuint16m4_t vd, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfclass_v_bf16m4_u16m4_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vfclass_v_bf16m8_u16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vfclass.nxv32bf16.i64(<vscale x 32 x i16> [[VD]], <vscale x 32 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vfclass_v_bf16m8_u16m8_tu(vuint16m8_t vd, vbfloat16m8_t vs2,
+ size_t vl) {
+ return __riscv_vfclass_v_bf16m8_u16m8_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vfclass_v_bf16mf4_u16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfclass.mask.nxv1bf16.i64(<vscale x 1 x i16> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vfclass_v_bf16mf4_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfclass_v_bf16mf4_u16mf4_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vfclass_v_bf16mf2_u16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfclass.mask.nxv2bf16.i64(<vscale x 2 x i16> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vfclass_v_bf16mf2_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfclass_v_bf16mf2_u16mf2_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vfclass_v_bf16m1_u16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfclass.mask.nxv4bf16.i64(<vscale x 4 x i16> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vfclass_v_bf16m1_u16m1_tum(vbool16_t vm, vuint16m1_t vd,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfclass_v_bf16m1_u16m1_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vfclass_v_bf16m2_u16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfclass.mask.nxv8bf16.i64(<vscale x 8 x i16> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vfclass_v_bf16m2_u16m2_tum(vbool8_t vm, vuint16m2_t vd,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfclass_v_bf16m2_u16m2_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vfclass_v_bf16m4_u16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfclass.mask.nxv16bf16.i64(<vscale x 16 x i16> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vfclass_v_bf16m4_u16m4_tum(vbool4_t vm, vuint16m4_t vd,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfclass_v_bf16m4_u16m4_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vfclass_v_bf16m8_u16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vfclass.mask.nxv32bf16.i64(<vscale x 32 x i16> [[VD]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vfclass_v_bf16m8_u16m8_tum(vbool2_t vm, vuint16m8_t vd,
+ vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfclass_v_bf16m8_u16m8_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vfclass_v_bf16mf4_u16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfclass.mask.nxv1bf16.i64(<vscale x 1 x i16> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vfclass_v_bf16mf4_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfclass_v_bf16mf4_u16mf4_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vfclass_v_bf16mf2_u16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfclass.mask.nxv2bf16.i64(<vscale x 2 x i16> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vfclass_v_bf16mf2_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfclass_v_bf16mf2_u16mf2_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vfclass_v_bf16m1_u16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfclass.mask.nxv4bf16.i64(<vscale x 4 x i16> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vfclass_v_bf16m1_u16m1_tumu(vbool16_t vm, vuint16m1_t vd,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfclass_v_bf16m1_u16m1_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vfclass_v_bf16m2_u16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfclass.mask.nxv8bf16.i64(<vscale x 8 x i16> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vfclass_v_bf16m2_u16m2_tumu(vbool8_t vm, vuint16m2_t vd,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfclass_v_bf16m2_u16m2_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vfclass_v_bf16m4_u16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfclass.mask.nxv16bf16.i64(<vscale x 16 x i16> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vfclass_v_bf16m4_u16m4_tumu(vbool4_t vm, vuint16m4_t vd,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfclass_v_bf16m4_u16m4_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vfclass_v_bf16m8_u16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vfclass.mask.nxv32bf16.i64(<vscale x 32 x i16> [[VD]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vfclass_v_bf16m8_u16m8_tumu(vbool2_t vm, vuint16m8_t vd,
+ vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfclass_v_bf16m8_u16m8_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vfclass_v_bf16mf4_u16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfclass.mask.nxv1bf16.i64(<vscale x 1 x i16> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vfclass_v_bf16mf4_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfclass_v_bf16mf4_u16mf4_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vfclass_v_bf16mf2_u16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfclass.mask.nxv2bf16.i64(<vscale x 2 x i16> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vfclass_v_bf16mf2_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfclass_v_bf16mf2_u16mf2_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vfclass_v_bf16m1_u16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfclass.mask.nxv4bf16.i64(<vscale x 4 x i16> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vfclass_v_bf16m1_u16m1_mu(vbool16_t vm, vuint16m1_t vd,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfclass_v_bf16m1_u16m1_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vfclass_v_bf16m2_u16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfclass.mask.nxv8bf16.i64(<vscale x 8 x i16> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vfclass_v_bf16m2_u16m2_mu(vbool8_t vm, vuint16m2_t vd,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfclass_v_bf16m2_u16m2_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vfclass_v_bf16m4_u16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfclass.mask.nxv16bf16.i64(<vscale x 16 x i16> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vfclass_v_bf16m4_u16m4_mu(vbool4_t vm, vuint16m4_t vd,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfclass_v_bf16m4_u16m4_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vfclass_v_bf16m8_u16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vfclass.mask.nxv32bf16.i64(<vscale x 32 x i16> [[VD]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vfclass_v_bf16m8_u16m8_mu(vbool2_t vm, vuint16m8_t vd,
+ vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfclass_v_bf16m8_u16m8_mu(vm, vd, vs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfmacc.c
new file mode 100644
index 0000000000000..2d4e481b1f8bb
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfmacc.c
@@ -0,0 +1,489 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmacc_vv_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmacc.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmacc_vv_bf16mf4_tu(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmacc_vv_bf16mf4_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmacc_vf_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmacc.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmacc_vf_bf16mf4_tu(vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmacc_vf_bf16mf4_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmacc_vv_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmacc.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmacc_vv_bf16mf2_tu(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmacc_vv_bf16mf2_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmacc_vf_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmacc.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmacc_vf_bf16mf2_tu(vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmacc_vf_bf16mf2_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmacc_vv_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmacc.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmacc_vv_bf16m1_tu(vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmacc_vv_bf16m1_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmacc_vf_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmacc.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmacc_vf_bf16m1_tu(vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmacc_vf_bf16m1_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmacc_vv_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmacc.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmacc_vv_bf16m2_tu(vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmacc_vv_bf16m2_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmacc_vf_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmacc.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmacc_vf_bf16m2_tu(vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmacc_vf_bf16m2_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmacc_vv_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmacc.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmacc_vv_bf16m4_tu(vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmacc_vv_bf16m4_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmacc_vf_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmacc.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmacc_vf_bf16m4_tu(vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmacc_vf_bf16m4_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmacc_vv_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmacc.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmacc_vv_bf16m8_tu(vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmacc_vv_bf16m8_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmacc_vf_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmacc.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmacc_vf_bf16m8_tu(vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmacc_vf_bf16m8_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmacc_vv_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmacc.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmacc_vv_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmacc_vv_bf16mf4_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmacc_vf_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmacc.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmacc_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmacc_vf_bf16mf4_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmacc_vv_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmacc.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmacc_vv_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmacc_vv_bf16mf2_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmacc_vf_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmacc.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmacc_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmacc_vf_bf16mf2_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmacc_vv_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmacc.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmacc_vv_bf16m1_tum(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmacc_vv_bf16m1_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmacc_vf_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmacc.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmacc_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmacc_vf_bf16m1_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmacc_vv_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmacc.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmacc_vv_bf16m2_tum(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmacc_vv_bf16m2_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmacc_vf_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmacc.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmacc_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmacc_vf_bf16m2_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmacc_vv_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmacc.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmacc_vv_bf16m4_tum(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmacc_vv_bf16m4_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmacc_vf_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmacc.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmacc_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmacc_vf_bf16m4_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmacc_vv_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmacc.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmacc_vv_bf16m8_tum(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmacc_vv_bf16m8_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmacc_vf_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmacc.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmacc_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmacc_vf_bf16m8_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmacc_vv_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmacc.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmacc_vv_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmacc_vv_bf16mf4_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmacc_vf_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmacc.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmacc_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmacc_vf_bf16mf4_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmacc_vv_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmacc.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmacc_vv_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmacc_vv_bf16mf2_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmacc_vf_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmacc.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmacc_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmacc_vf_bf16mf2_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmacc_vv_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmacc.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmacc_vv_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmacc_vv_bf16m1_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmacc_vf_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmacc.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmacc_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmacc_vf_bf16m1_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmacc_vv_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmacc.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmacc_vv_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmacc_vv_bf16m2_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmacc_vf_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmacc.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmacc_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmacc_vf_bf16m2_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmacc_vv_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmacc.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmacc_vv_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmacc_vv_bf16m4_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmacc_vf_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmacc.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmacc_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmacc_vf_bf16m4_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmacc_vv_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmacc.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmacc_vv_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmacc_vv_bf16m8_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmacc_vf_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmacc.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmacc_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmacc_vf_bf16m8_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmacc_vv_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmacc.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmacc_vv_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmacc_vv_bf16mf4_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmacc_vf_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmacc.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmacc_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmacc_vf_bf16mf4_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmacc_vv_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmacc.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmacc_vv_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmacc_vv_bf16mf2_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmacc_vf_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmacc.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmacc_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmacc_vf_bf16mf2_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmacc_vv_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmacc.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmacc_vv_bf16m1_mu(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmacc_vv_bf16m1_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmacc_vf_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmacc.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmacc_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmacc_vf_bf16m1_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmacc_vv_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmacc.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmacc_vv_bf16m2_mu(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmacc_vv_bf16m2_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmacc_vf_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmacc.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmacc_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmacc_vf_bf16m2_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmacc_vv_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmacc.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmacc_vv_bf16m4_mu(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmacc_vv_bf16m4_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmacc_vf_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmacc.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmacc_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmacc_vf_bf16m4_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmacc_vv_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmacc.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmacc_vv_bf16m8_mu(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmacc_vv_bf16m8_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmacc_vf_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmacc.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmacc_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmacc_vf_bf16m8_mu(mask, vd, rs1, vs2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfmadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfmadd.c
new file mode 100644
index 0000000000000..511e073e10143
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfmadd.c
@@ -0,0 +1,489 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmadd_vv_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmadd.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmadd_vv_bf16mf4_tu(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmadd_vv_bf16mf4_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmadd_vf_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmadd.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmadd_vf_bf16mf4_tu(vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmadd_vf_bf16mf4_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmadd_vv_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmadd.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmadd_vv_bf16mf2_tu(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmadd_vv_bf16mf2_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmadd_vf_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmadd.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmadd_vf_bf16mf2_tu(vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmadd_vf_bf16mf2_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmadd_vv_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmadd.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmadd_vv_bf16m1_tu(vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmadd_vv_bf16m1_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmadd_vf_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmadd.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmadd_vf_bf16m1_tu(vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmadd_vf_bf16m1_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmadd_vv_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmadd.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmadd_vv_bf16m2_tu(vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmadd_vv_bf16m2_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmadd_vf_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmadd.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmadd_vf_bf16m2_tu(vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmadd_vf_bf16m2_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmadd_vv_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmadd.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmadd_vv_bf16m4_tu(vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmadd_vv_bf16m4_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmadd_vf_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmadd.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmadd_vf_bf16m4_tu(vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmadd_vf_bf16m4_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmadd_vv_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmadd.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmadd_vv_bf16m8_tu(vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmadd_vv_bf16m8_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmadd_vf_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmadd.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmadd_vf_bf16m8_tu(vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmadd_vf_bf16m8_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmadd_vv_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmadd.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmadd_vv_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmadd_vv_bf16mf4_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmadd_vf_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmadd.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmadd_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmadd_vf_bf16mf4_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmadd_vv_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmadd.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmadd_vv_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmadd_vv_bf16mf2_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmadd_vf_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmadd.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmadd_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmadd_vf_bf16mf2_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmadd_vv_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmadd.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmadd_vv_bf16m1_tum(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmadd_vv_bf16m1_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmadd_vf_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmadd.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmadd_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmadd_vf_bf16m1_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmadd_vv_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmadd.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmadd_vv_bf16m2_tum(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmadd_vv_bf16m2_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmadd_vf_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmadd.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmadd_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmadd_vf_bf16m2_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmadd_vv_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmadd.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmadd_vv_bf16m4_tum(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmadd_vv_bf16m4_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmadd_vf_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmadd.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmadd_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmadd_vf_bf16m4_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmadd_vv_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmadd.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmadd_vv_bf16m8_tum(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmadd_vv_bf16m8_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmadd_vf_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmadd.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmadd_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmadd_vf_bf16m8_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmadd_vv_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmadd.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmadd_vv_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmadd_vv_bf16mf4_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmadd_vf_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmadd.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmadd_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmadd_vf_bf16mf4_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmadd_vv_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmadd.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmadd_vv_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmadd_vv_bf16mf2_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmadd_vf_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmadd.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmadd_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmadd_vf_bf16mf2_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmadd_vv_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmadd.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmadd_vv_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmadd_vv_bf16m1_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmadd_vf_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmadd.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmadd_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmadd_vf_bf16m1_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmadd_vv_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmadd.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmadd_vv_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmadd_vv_bf16m2_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmadd_vf_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmadd.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmadd_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmadd_vf_bf16m2_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmadd_vv_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmadd.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmadd_vv_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmadd_vv_bf16m4_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmadd_vf_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmadd.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmadd_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmadd_vf_bf16m4_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmadd_vv_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmadd.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmadd_vv_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmadd_vv_bf16m8_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmadd_vf_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmadd.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmadd_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmadd_vf_bf16m8_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmadd_vv_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmadd.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmadd_vv_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmadd_vv_bf16mf4_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmadd_vf_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmadd.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmadd_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmadd_vf_bf16mf4_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmadd_vv_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmadd.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmadd_vv_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmadd_vv_bf16mf2_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmadd_vf_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmadd.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmadd_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmadd_vf_bf16mf2_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmadd_vv_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmadd.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmadd_vv_bf16m1_mu(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmadd_vv_bf16m1_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmadd_vf_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmadd.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmadd_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmadd_vf_bf16m1_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmadd_vv_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmadd.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmadd_vv_bf16m2_mu(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmadd_vv_bf16m2_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmadd_vf_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmadd.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmadd_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmadd_vf_bf16m2_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmadd_vv_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmadd.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmadd_vv_bf16m4_mu(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmadd_vv_bf16m4_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmadd_vf_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmadd.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmadd_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmadd_vf_bf16m4_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmadd_vv_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmadd.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmadd_vv_bf16m8_mu(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmadd_vv_bf16m8_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmadd_vf_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmadd.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmadd_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmadd_vf_bf16m8_mu(mask, vd, rs1, vs2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfmax.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfmax.c
new file mode 100644
index 0000000000000..f3698d41aff34
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfmax.c
@@ -0,0 +1,489 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmax_vv_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmax.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmax_vv_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfmax_vv_bf16mf4_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmax_vf_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmax.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmax_vf_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_vf_bf16mf4_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmax_vv_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmax.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmax_vv_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfmax_vv_bf16mf2_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmax_vf_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmax.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmax_vf_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_vf_bf16mf2_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmax_vv_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmax.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmax_vv_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfmax_vv_bf16m1_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmax_vf_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmax.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmax_vf_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_vf_bf16m1_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmax_vv_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmax.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmax_vv_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfmax_vv_bf16m2_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmax_vf_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmax.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmax_vf_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_vf_bf16m2_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmax_vv_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmax.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmax_vv_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfmax_vv_bf16m4_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmax_vf_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmax.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmax_vf_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_vf_bf16m4_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmax_vv_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmax.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmax_vv_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfmax_vv_bf16m8_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmax_vf_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmax.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmax_vf_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_vf_bf16m8_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmax_vv_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmax.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmax_vv_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfmax_vv_bf16mf4_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmax_vf_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmax.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmax_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_vf_bf16mf4_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmax_vv_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmax.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmax_vv_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfmax_vv_bf16mf2_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmax_vf_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmax.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmax_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_vf_bf16mf2_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmax_vv_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmax.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmax_vv_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfmax_vv_bf16m1_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmax_vf_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmax.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmax_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_vf_bf16m1_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmax_vv_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmax.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmax_vv_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfmax_vv_bf16m2_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmax_vf_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmax.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmax_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_vf_bf16m2_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmax_vv_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmax.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmax_vv_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfmax_vv_bf16m4_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmax_vf_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmax.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmax_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_vf_bf16m4_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmax_vv_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmax.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmax_vv_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfmax_vv_bf16m8_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmax_vf_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmax.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmax_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_vf_bf16m8_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmax_vv_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmax.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmax_vv_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfmax_vv_bf16mf4_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmax_vf_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmax.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmax_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_vf_bf16mf4_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmax_vv_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmax.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmax_vv_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfmax_vv_bf16mf2_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmax_vf_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmax.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmax_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_vf_bf16mf2_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmax_vv_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmax.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmax_vv_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfmax_vv_bf16m1_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmax_vf_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmax.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmax_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_vf_bf16m1_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmax_vv_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmax.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmax_vv_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfmax_vv_bf16m2_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmax_vf_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmax.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmax_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_vf_bf16m2_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmax_vv_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmax.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmax_vv_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfmax_vv_bf16m4_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmax_vf_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmax.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmax_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_vf_bf16m4_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmax_vv_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmax.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmax_vv_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfmax_vv_bf16m8_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmax_vf_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmax.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmax_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_vf_bf16m8_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmax_vv_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmax.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmax_vv_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfmax_vv_bf16mf4_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmax_vf_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmax.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmax_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_vf_bf16mf4_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmax_vv_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmax.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmax_vv_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfmax_vv_bf16mf2_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmax_vf_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmax.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmax_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_vf_bf16mf2_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmax_vv_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmax.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmax_vv_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfmax_vv_bf16m1_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmax_vf_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmax.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmax_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_vf_bf16m1_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmax_vv_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmax.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmax_vv_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfmax_vv_bf16m2_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmax_vf_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmax.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmax_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_vf_bf16m2_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmax_vv_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmax.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmax_vv_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfmax_vv_bf16m4_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmax_vf_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmax.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmax_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_vf_bf16m4_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmax_vv_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmax.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmax_vv_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfmax_vv_bf16m8_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmax_vf_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmax.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmax_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_vf_bf16m8_mu(mask, maskedoff, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfmerge.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfmerge.c
new file mode 100644
index 0000000000000..bcaf2cb8817e4
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfmerge.c
@@ -0,0 +1,69 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmerge_vfm_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmerge.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmerge_vfm_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, vbool64_t mask, size_t vl) {
+ return __riscv_vfmerge_vfm_bf16mf4_tu(maskedoff, op1, op2, mask, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmerge_vfm_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmerge.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmerge_vfm_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, vbool32_t mask, size_t vl) {
+ return __riscv_vfmerge_vfm_bf16mf2_tu(maskedoff, op1, op2, mask, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmerge_vfm_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmerge.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmerge_vfm_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, vbool16_t mask, size_t vl) {
+ return __riscv_vfmerge_vfm_bf16m1_tu(maskedoff, op1, op2, mask, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmerge_vfm_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmerge.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmerge_vfm_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, vbool8_t mask, size_t vl) {
+ return __riscv_vfmerge_vfm_bf16m2_tu(maskedoff, op1, op2, mask, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmerge_vfm_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmerge.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmerge_vfm_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, vbool4_t mask, size_t vl) {
+ return __riscv_vfmerge_vfm_bf16m4_tu(maskedoff, op1, op2, mask, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmerge_vfm_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmerge.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmerge_vfm_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, vbool2_t mask, size_t vl) {
+ return __riscv_vfmerge_vfm_bf16m8_tu(maskedoff, op1, op2, mask, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfmin.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfmin.c
new file mode 100644
index 0000000000000..911f8792e4436
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfmin.c
@@ -0,0 +1,489 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmin_vv_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmin.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmin_vv_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfmin_vv_bf16mf4_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmin_vf_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmin.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmin_vf_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_vf_bf16mf4_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmin_vv_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmin.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmin_vv_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfmin_vv_bf16mf2_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmin_vf_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmin.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmin_vf_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_vf_bf16mf2_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmin_vv_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmin.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmin_vv_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfmin_vv_bf16m1_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmin_vf_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmin.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmin_vf_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_vf_bf16m1_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmin_vv_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmin.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmin_vv_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfmin_vv_bf16m2_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmin_vf_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmin.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmin_vf_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_vf_bf16m2_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmin_vv_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmin.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmin_vv_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfmin_vv_bf16m4_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmin_vf_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmin.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmin_vf_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_vf_bf16m4_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmin_vv_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmin.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmin_vv_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfmin_vv_bf16m8_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmin_vf_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmin.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmin_vf_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_vf_bf16m8_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmin_vv_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmin.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmin_vv_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfmin_vv_bf16mf4_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmin_vf_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmin.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmin_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_vf_bf16mf4_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmin_vv_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmin.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmin_vv_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfmin_vv_bf16mf2_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmin_vf_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmin.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmin_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_vf_bf16mf2_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmin_vv_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmin.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmin_vv_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfmin_vv_bf16m1_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmin_vf_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmin.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmin_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_vf_bf16m1_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmin_vv_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmin.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmin_vv_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfmin_vv_bf16m2_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmin_vf_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmin.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmin_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_vf_bf16m2_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmin_vv_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmin.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmin_vv_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfmin_vv_bf16m4_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmin_vf_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmin.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmin_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_vf_bf16m4_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmin_vv_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmin.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmin_vv_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfmin_vv_bf16m8_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmin_vf_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmin.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmin_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_vf_bf16m8_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmin_vv_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmin.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmin_vv_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfmin_vv_bf16mf4_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmin_vf_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmin.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmin_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_vf_bf16mf4_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmin_vv_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmin.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmin_vv_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfmin_vv_bf16mf2_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmin_vf_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmin.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmin_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_vf_bf16mf2_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmin_vv_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmin.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmin_vv_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfmin_vv_bf16m1_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmin_vf_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmin.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmin_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_vf_bf16m1_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmin_vv_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmin.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmin_vv_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfmin_vv_bf16m2_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmin_vf_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmin.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmin_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_vf_bf16m2_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmin_vv_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmin.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmin_vv_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfmin_vv_bf16m4_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmin_vf_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmin.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmin_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_vf_bf16m4_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmin_vv_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmin.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmin_vv_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfmin_vv_bf16m8_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmin_vf_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmin.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmin_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_vf_bf16m8_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmin_vv_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmin.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmin_vv_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfmin_vv_bf16mf4_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmin_vf_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmin.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmin_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_vf_bf16mf4_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmin_vv_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmin.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmin_vv_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfmin_vv_bf16mf2_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmin_vf_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmin.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmin_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_vf_bf16mf2_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmin_vv_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmin.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmin_vv_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfmin_vv_bf16m1_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmin_vf_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmin.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmin_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_vf_bf16m1_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmin_vv_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmin.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmin_vv_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfmin_vv_bf16m2_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmin_vf_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmin.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmin_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_vf_bf16m2_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmin_vv_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmin.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmin_vv_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfmin_vv_bf16m4_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmin_vf_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmin.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmin_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_vf_bf16m4_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmin_vv_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmin.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmin_vv_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfmin_vv_bf16m8_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmin_vf_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmin.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmin_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_vf_bf16m8_mu(mask, maskedoff, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfmsac.c
new file mode 100644
index 0000000000000..9575ad337de8c
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfmsac.c
@@ -0,0 +1,489 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmsac_vv_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmsac.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmsac_vv_bf16mf4_tu(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmsac_vv_bf16mf4_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmsac_vf_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmsac.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmsac_vf_bf16mf4_tu(vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmsac_vf_bf16mf4_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmsac_vv_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmsac.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmsac_vv_bf16mf2_tu(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmsac_vv_bf16mf2_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmsac_vf_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmsac.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmsac_vf_bf16mf2_tu(vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmsac_vf_bf16mf2_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmsac_vv_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmsac.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmsac_vv_bf16m1_tu(vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmsac_vv_bf16m1_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmsac_vf_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmsac.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmsac_vf_bf16m1_tu(vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmsac_vf_bf16m1_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmsac_vv_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmsac.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmsac_vv_bf16m2_tu(vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmsac_vv_bf16m2_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmsac_vf_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmsac.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmsac_vf_bf16m2_tu(vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmsac_vf_bf16m2_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmsac_vv_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmsac.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmsac_vv_bf16m4_tu(vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmsac_vv_bf16m4_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmsac_vf_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmsac.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmsac_vf_bf16m4_tu(vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmsac_vf_bf16m4_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmsac_vv_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmsac.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmsac_vv_bf16m8_tu(vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmsac_vv_bf16m8_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmsac_vf_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmsac.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmsac_vf_bf16m8_tu(vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmsac_vf_bf16m8_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmsac_vv_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmsac.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmsac_vv_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmsac_vv_bf16mf4_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmsac_vf_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmsac.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmsac_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmsac_vf_bf16mf4_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmsac_vv_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmsac.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmsac_vv_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmsac_vv_bf16mf2_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmsac_vf_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmsac.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmsac_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmsac_vf_bf16mf2_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmsac_vv_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmsac.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmsac_vv_bf16m1_tum(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmsac_vv_bf16m1_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmsac_vf_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmsac.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmsac_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmsac_vf_bf16m1_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmsac_vv_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmsac.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmsac_vv_bf16m2_tum(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmsac_vv_bf16m2_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmsac_vf_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmsac.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmsac_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmsac_vf_bf16m2_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmsac_vv_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmsac.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmsac_vv_bf16m4_tum(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmsac_vv_bf16m4_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmsac_vf_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmsac.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmsac_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmsac_vf_bf16m4_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmsac_vv_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmsac.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmsac_vv_bf16m8_tum(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmsac_vv_bf16m8_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmsac_vf_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmsac.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmsac_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmsac_vf_bf16m8_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmsac_vv_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmsac.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmsac_vv_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmsac_vv_bf16mf4_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmsac_vf_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmsac.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmsac_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmsac_vf_bf16mf4_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmsac_vv_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmsac.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmsac_vv_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmsac_vv_bf16mf2_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmsac_vf_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmsac.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmsac_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmsac_vf_bf16mf2_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmsac_vv_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmsac.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmsac_vv_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmsac_vv_bf16m1_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmsac_vf_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmsac.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmsac_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmsac_vf_bf16m1_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmsac_vv_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmsac.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmsac_vv_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmsac_vv_bf16m2_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmsac_vf_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmsac.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmsac_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmsac_vf_bf16m2_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmsac_vv_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmsac.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmsac_vv_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmsac_vv_bf16m4_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmsac_vf_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmsac.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmsac_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmsac_vf_bf16m4_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmsac_vv_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmsac.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmsac_vv_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmsac_vv_bf16m8_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmsac_vf_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmsac.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmsac_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmsac_vf_bf16m8_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmsac_vv_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmsac.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmsac_vv_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmsac_vv_bf16mf4_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmsac_vf_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmsac.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmsac_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmsac_vf_bf16mf4_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmsac_vv_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmsac.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmsac_vv_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmsac_vv_bf16mf2_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmsac_vf_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmsac.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmsac_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmsac_vf_bf16mf2_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmsac_vv_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmsac.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmsac_vv_bf16m1_mu(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmsac_vv_bf16m1_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmsac_vf_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmsac.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmsac_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmsac_vf_bf16m1_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmsac_vv_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmsac.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmsac_vv_bf16m2_mu(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmsac_vv_bf16m2_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmsac_vf_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmsac.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmsac_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmsac_vf_bf16m2_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmsac_vv_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmsac.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmsac_vv_bf16m4_mu(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmsac_vv_bf16m4_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmsac_vf_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmsac.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmsac_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmsac_vf_bf16m4_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmsac_vv_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmsac.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmsac_vv_bf16m8_mu(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmsac_vv_bf16m8_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmsac_vf_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmsac.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmsac_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmsac_vf_bf16m8_mu(mask, vd, rs1, vs2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfmsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfmsub.c
new file mode 100644
index 0000000000000..8e382f710ab5a
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfmsub.c
@@ -0,0 +1,489 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmsub_vv_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmsub.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmsub_vv_bf16mf4_tu(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmsub_vv_bf16mf4_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmsub_vf_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmsub.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmsub_vf_bf16mf4_tu(vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmsub_vf_bf16mf4_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmsub_vv_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmsub.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmsub_vv_bf16mf2_tu(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmsub_vv_bf16mf2_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmsub_vf_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmsub.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmsub_vf_bf16mf2_tu(vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmsub_vf_bf16mf2_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmsub_vv_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmsub.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmsub_vv_bf16m1_tu(vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmsub_vv_bf16m1_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmsub_vf_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmsub.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmsub_vf_bf16m1_tu(vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmsub_vf_bf16m1_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmsub_vv_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmsub.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmsub_vv_bf16m2_tu(vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmsub_vv_bf16m2_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmsub_vf_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmsub.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmsub_vf_bf16m2_tu(vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmsub_vf_bf16m2_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmsub_vv_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmsub.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmsub_vv_bf16m4_tu(vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmsub_vv_bf16m4_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmsub_vf_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmsub.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmsub_vf_bf16m4_tu(vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmsub_vf_bf16m4_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmsub_vv_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmsub.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmsub_vv_bf16m8_tu(vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmsub_vv_bf16m8_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmsub_vf_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmsub.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmsub_vf_bf16m8_tu(vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmsub_vf_bf16m8_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmsub_vv_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmsub.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmsub_vv_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmsub_vv_bf16mf4_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmsub_vf_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmsub.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmsub_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmsub_vf_bf16mf4_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmsub_vv_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmsub.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmsub_vv_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmsub_vv_bf16mf2_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmsub_vf_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmsub.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmsub_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmsub_vf_bf16mf2_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmsub_vv_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmsub.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmsub_vv_bf16m1_tum(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmsub_vv_bf16m1_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmsub_vf_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmsub.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmsub_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmsub_vf_bf16m1_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmsub_vv_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmsub.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmsub_vv_bf16m2_tum(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmsub_vv_bf16m2_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmsub_vf_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmsub.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmsub_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmsub_vf_bf16m2_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmsub_vv_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmsub.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmsub_vv_bf16m4_tum(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmsub_vv_bf16m4_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmsub_vf_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmsub.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmsub_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmsub_vf_bf16m4_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmsub_vv_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmsub.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmsub_vv_bf16m8_tum(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmsub_vv_bf16m8_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmsub_vf_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmsub.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmsub_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmsub_vf_bf16m8_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmsub_vv_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmsub.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmsub_vv_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmsub_vv_bf16mf4_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmsub_vf_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmsub.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmsub_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmsub_vf_bf16mf4_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmsub_vv_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmsub.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmsub_vv_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmsub_vv_bf16mf2_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmsub_vf_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmsub.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmsub_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmsub_vf_bf16mf2_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmsub_vv_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmsub.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmsub_vv_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmsub_vv_bf16m1_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmsub_vf_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmsub.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmsub_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmsub_vf_bf16m1_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmsub_vv_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmsub.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmsub_vv_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmsub_vv_bf16m2_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmsub_vf_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmsub.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmsub_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmsub_vf_bf16m2_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmsub_vv_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmsub.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmsub_vv_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmsub_vv_bf16m4_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmsub_vf_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmsub.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmsub_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmsub_vf_bf16m4_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmsub_vv_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmsub.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmsub_vv_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmsub_vv_bf16m8_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmsub_vf_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmsub.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmsub_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmsub_vf_bf16m8_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmsub_vv_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmsub.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmsub_vv_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmsub_vv_bf16mf4_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmsub_vf_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmsub.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmsub_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmsub_vf_bf16mf4_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmsub_vv_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmsub.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmsub_vv_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmsub_vv_bf16mf2_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmsub_vf_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmsub.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmsub_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmsub_vf_bf16mf2_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmsub_vv_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmsub.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmsub_vv_bf16m1_mu(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmsub_vv_bf16m1_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmsub_vf_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmsub.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmsub_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmsub_vf_bf16m1_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmsub_vv_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmsub.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmsub_vv_bf16m2_mu(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmsub_vv_bf16m2_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmsub_vf_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmsub.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmsub_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmsub_vf_bf16m2_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmsub_vv_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmsub.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmsub_vv_bf16m4_mu(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmsub_vv_bf16m4_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmsub_vf_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmsub.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmsub_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmsub_vf_bf16m4_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmsub_vv_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmsub.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmsub_vv_bf16m8_mu(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmsub_vv_bf16m8_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmsub_vf_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmsub.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmsub_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmsub_vf_bf16m8_mu(mask, vd, rs1, vs2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfmul.c
new file mode 100644
index 0000000000000..716f056f3e12c
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfmul.c
@@ -0,0 +1,489 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmul_vv_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmul.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmul_vv_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfmul_vv_bf16mf4_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmul_vf_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmul.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmul_vf_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_vf_bf16mf4_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmul_vv_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmul.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmul_vv_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfmul_vv_bf16mf2_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmul_vf_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmul.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmul_vf_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_vf_bf16mf2_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmul_vv_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmul.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmul_vv_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfmul_vv_bf16m1_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmul_vf_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmul.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmul_vf_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_vf_bf16m1_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmul_vv_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmul.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmul_vv_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfmul_vv_bf16m2_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmul_vf_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmul.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmul_vf_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_vf_bf16m2_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmul_vv_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmul.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmul_vv_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfmul_vv_bf16m4_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmul_vf_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmul.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmul_vf_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_vf_bf16m4_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmul_vv_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmul.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmul_vv_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfmul_vv_bf16m8_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmul_vf_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmul.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmul_vf_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_vf_bf16m8_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmul_vv_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmul.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmul_vv_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfmul_vv_bf16mf4_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmul_vf_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmul.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmul_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_vf_bf16mf4_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmul_vv_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmul.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmul_vv_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfmul_vv_bf16mf2_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmul_vf_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmul.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmul_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_vf_bf16mf2_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmul_vv_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmul.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmul_vv_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfmul_vv_bf16m1_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmul_vf_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmul.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmul_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_vf_bf16m1_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmul_vv_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmul.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmul_vv_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfmul_vv_bf16m2_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmul_vf_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmul.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmul_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_vf_bf16m2_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmul_vv_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmul.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmul_vv_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfmul_vv_bf16m4_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmul_vf_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmul.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmul_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_vf_bf16m4_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmul_vv_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmul.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmul_vv_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfmul_vv_bf16m8_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmul_vf_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmul.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmul_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_vf_bf16m8_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmul_vv_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmul.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmul_vv_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfmul_vv_bf16mf4_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmul_vf_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmul.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmul_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_vf_bf16mf4_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmul_vv_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmul.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmul_vv_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfmul_vv_bf16mf2_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmul_vf_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmul.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmul_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_vf_bf16mf2_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmul_vv_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmul.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmul_vv_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfmul_vv_bf16m1_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmul_vf_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmul.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmul_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_vf_bf16m1_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmul_vv_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmul.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmul_vv_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfmul_vv_bf16m2_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmul_vf_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmul.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmul_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_vf_bf16m2_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmul_vv_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmul.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmul_vv_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfmul_vv_bf16m4_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmul_vf_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmul.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmul_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_vf_bf16m4_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmul_vv_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmul.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmul_vv_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfmul_vv_bf16m8_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmul_vf_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmul.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmul_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_vf_bf16m8_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmul_vv_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmul.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmul_vv_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfmul_vv_bf16mf4_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmul_vf_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmul.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmul_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_vf_bf16mf4_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmul_vv_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmul.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmul_vv_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfmul_vv_bf16mf2_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmul_vf_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmul.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmul_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_vf_bf16mf2_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmul_vv_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmul.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmul_vv_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfmul_vv_bf16m1_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmul_vf_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmul.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmul_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_vf_bf16m1_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmul_vv_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmul.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmul_vv_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfmul_vv_bf16m2_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmul_vf_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmul.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmul_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_vf_bf16m2_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmul_vv_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmul.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmul_vv_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfmul_vv_bf16m4_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmul_vf_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmul.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmul_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_vf_bf16m4_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmul_vv_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmul.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmul_vv_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfmul_vv_bf16m8_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmul_vf_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmul.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmul_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_vf_bf16m8_mu(mask, maskedoff, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfmv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfmv.c
new file mode 100644
index 0000000000000..069ee6a2a5df3
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfmv.c
@@ -0,0 +1,129 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmv_v_f_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmv.v.f.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], bfloat [[SRC]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmv_v_f_bf16mf4_tu(vbfloat16mf4_t maskedoff, __bf16 src, size_t vl) {
+ return __riscv_vfmv_v_f_bf16mf4_tu(maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmv_v_f_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmv.v.f.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], bfloat [[SRC]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmv_v_f_bf16mf2_tu(vbfloat16mf2_t maskedoff, __bf16 src, size_t vl) {
+ return __riscv_vfmv_v_f_bf16mf2_tu(maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmv_v_f_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmv.v.f.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], bfloat [[SRC]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmv_v_f_bf16m1_tu(vbfloat16m1_t maskedoff, __bf16 src, size_t vl) {
+ return __riscv_vfmv_v_f_bf16m1_tu(maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmv_v_f_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmv.v.f.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], bfloat [[SRC]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmv_v_f_bf16m2_tu(vbfloat16m2_t maskedoff, __bf16 src, size_t vl) {
+ return __riscv_vfmv_v_f_bf16m2_tu(maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmv_v_f_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmv.v.f.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], bfloat [[SRC]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmv_v_f_bf16m4_tu(vbfloat16m4_t maskedoff, __bf16 src, size_t vl) {
+ return __riscv_vfmv_v_f_bf16m4_tu(maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmv_v_f_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmv.v.f.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], bfloat [[SRC]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmv_v_f_bf16m8_tu(vbfloat16m8_t maskedoff, __bf16 src, size_t vl) {
+ return __riscv_vfmv_v_f_bf16m8_tu(maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmv_s_f_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmv.s.f.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], bfloat [[SRC]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmv_s_f_bf16mf4_tu(vbfloat16mf4_t maskedoff, __bf16 src, size_t vl) {
+ return __riscv_vfmv_s_f_bf16mf4_tu(maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmv_s_f_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmv.s.f.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], bfloat [[SRC]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmv_s_f_bf16mf2_tu(vbfloat16mf2_t maskedoff, __bf16 src, size_t vl) {
+ return __riscv_vfmv_s_f_bf16mf2_tu(maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmv_s_f_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmv.s.f.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], bfloat [[SRC]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmv_s_f_bf16m1_tu(vbfloat16m1_t maskedoff, __bf16 src, size_t vl) {
+ return __riscv_vfmv_s_f_bf16m1_tu(maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmv_s_f_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmv.s.f.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], bfloat [[SRC]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmv_s_f_bf16m2_tu(vbfloat16m2_t maskedoff, __bf16 src, size_t vl) {
+ return __riscv_vfmv_s_f_bf16m2_tu(maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmv_s_f_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmv.s.f.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], bfloat [[SRC]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmv_s_f_bf16m4_tu(vbfloat16m4_t maskedoff, __bf16 src, size_t vl) {
+ return __riscv_vfmv_s_f_bf16m4_tu(maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmv_s_f_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmv.s.f.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], bfloat [[SRC]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmv_s_f_bf16m8_tu(vbfloat16m8_t maskedoff, __bf16 src, size_t vl) {
+ return __riscv_vfmv_s_f_bf16m8_tu(maskedoff, src, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfncvt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfncvt.c
new file mode 100644
index 0000000000000..36d4fc332499f
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfncvt.c
@@ -0,0 +1,1577 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_x_f_w_bf16mf4_i8mf8_tu(
+// CHECK-RV64-SAME: <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.x.f.w.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vfncvt_x_f_w_bf16mf4_i8mf8_tu(vint8mf8_t vd, vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16mf4_i8mf8_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_x_f_w_bf16mf2_i8mf4_tu(
+// CHECK-RV64-SAME: <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.x.f.w.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vfncvt_x_f_w_bf16mf2_i8mf4_tu(vint8mf4_t vd, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16mf2_i8mf4_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_x_f_w_bf16m1_i8mf2_tu(
+// CHECK-RV64-SAME: <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.x.f.w.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vfncvt_x_f_w_bf16m1_i8mf2_tu(vint8mf2_t vd, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16m1_i8mf2_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_x_f_w_bf16m2_i8m1_tu(
+// CHECK-RV64-SAME: <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.x.f.w.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vfncvt_x_f_w_bf16m2_i8m1_tu(vint8m1_t vd, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16m2_i8m1_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_x_f_w_bf16m4_i8m2_tu(
+// CHECK-RV64-SAME: <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.x.f.w.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vfncvt_x_f_w_bf16m4_i8m2_tu(vint8m2_t vd, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16m4_i8m2_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_x_f_w_bf16m8_i8m4_tu(
+// CHECK-RV64-SAME: <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.x.f.w.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vfncvt_x_f_w_bf16m8_i8m4_tu(vint8m4_t vd, vbfloat16m8_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16m8_i8m4_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_xu_f_w_bf16mf4_u8mf8_tu(
+// CHECK-RV64-SAME: <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vfncvt_xu_f_w_bf16mf4_u8mf8_tu(vuint8mf8_t vd,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16mf4_u8mf8_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_xu_f_w_bf16mf2_u8mf4_tu(
+// CHECK-RV64-SAME: <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vfncvt_xu_f_w_bf16mf2_u8mf4_tu(vuint8mf4_t vd,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16mf2_u8mf4_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_xu_f_w_bf16m1_u8mf2_tu(
+// CHECK-RV64-SAME: <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vfncvt_xu_f_w_bf16m1_u8mf2_tu(vuint8mf2_t vd,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16m1_u8mf2_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_xu_f_w_bf16m2_u8m1_tu(
+// CHECK-RV64-SAME: <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vfncvt_xu_f_w_bf16m2_u8m1_tu(vuint8m1_t vd, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16m2_u8m1_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_xu_f_w_bf16m4_u8m2_tu(
+// CHECK-RV64-SAME: <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vfncvt_xu_f_w_bf16m4_u8m2_tu(vuint8m2_t vd, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16m4_u8m2_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_xu_f_w_bf16m8_u8m4_tu(
+// CHECK-RV64-SAME: <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vfncvt_xu_f_w_bf16m8_u8m4_tu(vuint8m4_t vd, vbfloat16m8_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16m8_u8m4_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfncvt_f_f_w_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfncvt.f.f.w.nxv1bf16.nxv1f32.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x float> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfncvt_f_f_w_bf16mf4_tu(vbfloat16mf4_t vd,
+ vfloat32mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16mf4_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfncvt_f_f_w_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfncvt.f.f.w.nxv2bf16.nxv2f32.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x float> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfncvt_f_f_w_bf16mf2_tu(vbfloat16mf2_t vd, vfloat32m1_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16mf2_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfncvt_f_f_w_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfncvt.f.f.w.nxv4bf16.nxv4f32.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x float> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfncvt_f_f_w_bf16m1_tu(vbfloat16m1_t vd, vfloat32m2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16m1_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfncvt_f_f_w_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfncvt.f.f.w.nxv8bf16.nxv8f32.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x float> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfncvt_f_f_w_bf16m2_tu(vbfloat16m2_t vd, vfloat32m4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16m2_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfncvt_f_f_w_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfncvt.f.f.w.nxv16bf16.nxv16f32.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x float> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfncvt_f_f_w_bf16m4_tu(vbfloat16m4_t vd, vfloat32m8_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16m4_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_x_f_w_bf16mf4_i8mf8_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vfncvt_x_f_w_bf16mf4_i8mf8_tum(vbool64_t vm, vint8mf8_t vd,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16mf4_i8mf8_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_x_f_w_bf16mf2_i8mf4_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vfncvt_x_f_w_bf16mf2_i8mf4_tum(vbool32_t vm, vint8mf4_t vd,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16mf2_i8mf4_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_x_f_w_bf16m1_i8mf2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vfncvt_x_f_w_bf16m1_i8mf2_tum(vbool16_t vm, vint8mf2_t vd,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16m1_i8mf2_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_x_f_w_bf16m2_i8m1_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vfncvt_x_f_w_bf16m2_i8m1_tum(vbool8_t vm, vint8m1_t vd,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16m2_i8m1_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_x_f_w_bf16m4_i8m2_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vfncvt_x_f_w_bf16m4_i8m2_tum(vbool4_t vm, vint8m2_t vd,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16m4_i8m2_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_x_f_w_bf16m8_i8m4_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vfncvt_x_f_w_bf16m8_i8m4_tum(vbool2_t vm, vint8m4_t vd,
+ vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16m8_i8m4_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_xu_f_w_bf16mf4_u8mf8_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vfncvt_xu_f_w_bf16mf4_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16mf4_u8mf8_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_xu_f_w_bf16mf2_u8mf4_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vfncvt_xu_f_w_bf16mf2_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16mf2_u8mf4_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_xu_f_w_bf16m1_u8mf2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vfncvt_xu_f_w_bf16m1_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16m1_u8mf2_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_xu_f_w_bf16m2_u8m1_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vfncvt_xu_f_w_bf16m2_u8m1_tum(vbool8_t vm, vuint8m1_t vd,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16m2_u8m1_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_xu_f_w_bf16m4_u8m2_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vfncvt_xu_f_w_bf16m4_u8m2_tum(vbool4_t vm, vuint8m2_t vd,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16m4_u8m2_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_xu_f_w_bf16m8_u8m4_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vfncvt_xu_f_w_bf16m8_u8m4_tum(vbool2_t vm, vuint8m4_t vd,
+ vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16m8_u8m4_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfncvt_f_f_w_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv1bf16.nxv1f32.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfncvt_f_f_w_bf16mf4_tum(vbool64_t vm, vbfloat16mf4_t vd,
+ vfloat32mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16mf4_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfncvt_f_f_w_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv2bf16.nxv2f32.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfncvt_f_f_w_bf16mf2_tum(vbool32_t vm, vbfloat16mf2_t vd,
+ vfloat32m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16mf2_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfncvt_f_f_w_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv4bf16.nxv4f32.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfncvt_f_f_w_bf16m1_tum(vbool16_t vm, vbfloat16m1_t vd,
+ vfloat32m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16m1_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfncvt_f_f_w_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv8bf16.nxv8f32.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfncvt_f_f_w_bf16m2_tum(vbool8_t vm, vbfloat16m2_t vd,
+ vfloat32m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16m2_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfncvt_f_f_w_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv16bf16.nxv16f32.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfncvt_f_f_w_bf16m4_tum(vbool4_t vm, vbfloat16m4_t vd,
+ vfloat32m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16m4_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_x_f_w_bf16mf4_i8mf8_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vfncvt_x_f_w_bf16mf4_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16mf4_i8mf8_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_x_f_w_bf16mf2_i8mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vfncvt_x_f_w_bf16mf2_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16mf2_i8mf4_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_x_f_w_bf16m1_i8mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vfncvt_x_f_w_bf16m1_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16m1_i8mf2_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_x_f_w_bf16m2_i8m1_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vfncvt_x_f_w_bf16m2_i8m1_tumu(vbool8_t vm, vint8m1_t vd,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16m2_i8m1_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_x_f_w_bf16m4_i8m2_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vfncvt_x_f_w_bf16m4_i8m2_tumu(vbool4_t vm, vint8m2_t vd,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16m4_i8m2_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_x_f_w_bf16m8_i8m4_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vfncvt_x_f_w_bf16m8_i8m4_tumu(vbool2_t vm, vint8m4_t vd,
+ vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16m8_i8m4_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_xu_f_w_bf16mf4_u8mf8_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vfncvt_xu_f_w_bf16mf4_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16mf4_u8mf8_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_xu_f_w_bf16mf2_u8mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vfncvt_xu_f_w_bf16mf2_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16mf2_u8mf4_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_xu_f_w_bf16m1_u8mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vfncvt_xu_f_w_bf16m1_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16m1_u8mf2_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_xu_f_w_bf16m2_u8m1_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vfncvt_xu_f_w_bf16m2_u8m1_tumu(vbool8_t vm, vuint8m1_t vd,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16m2_u8m1_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_xu_f_w_bf16m4_u8m2_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vfncvt_xu_f_w_bf16m4_u8m2_tumu(vbool4_t vm, vuint8m2_t vd,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16m4_u8m2_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_xu_f_w_bf16m8_u8m4_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vfncvt_xu_f_w_bf16m8_u8m4_tumu(vbool2_t vm, vuint8m4_t vd,
+ vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16m8_u8m4_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfncvt_f_f_w_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv1bf16.nxv1f32.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfncvt_f_f_w_bf16mf4_tumu(vbool64_t vm, vbfloat16mf4_t vd,
+ vfloat32mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16mf4_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfncvt_f_f_w_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv2bf16.nxv2f32.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfncvt_f_f_w_bf16mf2_tumu(vbool32_t vm, vbfloat16mf2_t vd,
+ vfloat32m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16mf2_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfncvt_f_f_w_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv4bf16.nxv4f32.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfncvt_f_f_w_bf16m1_tumu(vbool16_t vm, vbfloat16m1_t vd,
+ vfloat32m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16m1_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfncvt_f_f_w_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv8bf16.nxv8f32.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfncvt_f_f_w_bf16m2_tumu(vbool8_t vm, vbfloat16m2_t vd,
+ vfloat32m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16m2_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfncvt_f_f_w_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv16bf16.nxv16f32.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfncvt_f_f_w_bf16m4_tumu(vbool4_t vm, vbfloat16m4_t vd,
+ vfloat32m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16m4_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_x_f_w_bf16mf4_i8mf8_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vfncvt_x_f_w_bf16mf4_i8mf8_mu(vbool64_t vm, vint8mf8_t vd,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16mf4_i8mf8_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_x_f_w_bf16mf2_i8mf4_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vfncvt_x_f_w_bf16mf2_i8mf4_mu(vbool32_t vm, vint8mf4_t vd,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16mf2_i8mf4_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_x_f_w_bf16m1_i8mf2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vfncvt_x_f_w_bf16m1_i8mf2_mu(vbool16_t vm, vint8mf2_t vd,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16m1_i8mf2_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_x_f_w_bf16m2_i8m1_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vfncvt_x_f_w_bf16m2_i8m1_mu(vbool8_t vm, vint8m1_t vd,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16m2_i8m1_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_x_f_w_bf16m4_i8m2_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vfncvt_x_f_w_bf16m4_i8m2_mu(vbool4_t vm, vint8m2_t vd,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16m4_i8m2_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_x_f_w_bf16m8_i8m4_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vfncvt_x_f_w_bf16m8_i8m4_mu(vbool2_t vm, vint8m4_t vd,
+ vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16m8_i8m4_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_xu_f_w_bf16mf4_u8mf8_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vfncvt_xu_f_w_bf16mf4_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16mf4_u8mf8_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_xu_f_w_bf16mf2_u8mf4_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vfncvt_xu_f_w_bf16mf2_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16mf2_u8mf4_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_xu_f_w_bf16m1_u8mf2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vfncvt_xu_f_w_bf16m1_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16m1_u8mf2_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_xu_f_w_bf16m2_u8m1_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vfncvt_xu_f_w_bf16m2_u8m1_mu(vbool8_t vm, vuint8m1_t vd,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16m2_u8m1_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_xu_f_w_bf16m4_u8m2_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vfncvt_xu_f_w_bf16m4_u8m2_mu(vbool4_t vm, vuint8m2_t vd,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16m4_u8m2_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_xu_f_w_bf16m8_u8m4_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vfncvt_xu_f_w_bf16m8_u8m4_mu(vbool2_t vm, vuint8m4_t vd,
+ vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16m8_u8m4_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfncvt_f_f_w_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv1bf16.nxv1f32.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfncvt_f_f_w_bf16mf4_mu(vbool64_t vm, vbfloat16mf4_t vd,
+ vfloat32mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16mf4_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfncvt_f_f_w_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv2bf16.nxv2f32.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfncvt_f_f_w_bf16mf2_mu(vbool32_t vm, vbfloat16mf2_t vd,
+ vfloat32m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16mf2_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfncvt_f_f_w_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv4bf16.nxv4f32.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfncvt_f_f_w_bf16m1_mu(vbool16_t vm, vbfloat16m1_t vd,
+ vfloat32m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16m1_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfncvt_f_f_w_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv8bf16.nxv8f32.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfncvt_f_f_w_bf16m2_mu(vbool8_t vm, vbfloat16m2_t vd,
+ vfloat32m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16m2_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfncvt_f_f_w_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv16bf16.nxv16f32.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfncvt_f_f_w_bf16m4_mu(vbool4_t vm, vbfloat16m4_t vd,
+ vfloat32m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16m4_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_x_f_w_bf16mf4_i8mf8_rm_tu(
+// CHECK-RV64-SAME: <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.x.f.w.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vfncvt_x_f_w_bf16mf4_i8mf8_rm_tu(vint8mf8_t vd,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16mf4_i8mf8_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_x_f_w_bf16mf2_i8mf4_rm_tu(
+// CHECK-RV64-SAME: <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.x.f.w.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vfncvt_x_f_w_bf16mf2_i8mf4_rm_tu(vint8mf4_t vd,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16mf2_i8mf4_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_x_f_w_bf16m1_i8mf2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.x.f.w.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vfncvt_x_f_w_bf16m1_i8mf2_rm_tu(vint8mf2_t vd,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16m1_i8mf2_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_x_f_w_bf16m2_i8m1_rm_tu(
+// CHECK-RV64-SAME: <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.x.f.w.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vfncvt_x_f_w_bf16m2_i8m1_rm_tu(vint8m1_t vd, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16m2_i8m1_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_x_f_w_bf16m4_i8m2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.x.f.w.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vfncvt_x_f_w_bf16m4_i8m2_rm_tu(vint8m2_t vd, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16m4_i8m2_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_x_f_w_bf16m8_i8m4_rm_tu(
+// CHECK-RV64-SAME: <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.x.f.w.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vfncvt_x_f_w_bf16m8_i8m4_rm_tu(vint8m4_t vd, vbfloat16m8_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16m8_i8m4_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_xu_f_w_bf16mf4_u8mf8_rm_tu(
+// CHECK-RV64-SAME: <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vfncvt_xu_f_w_bf16mf4_u8mf8_rm_tu(vuint8mf8_t vd,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16mf4_u8mf8_rm_tu(vd, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_xu_f_w_bf16mf2_u8mf4_rm_tu(
+// CHECK-RV64-SAME: <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vfncvt_xu_f_w_bf16mf2_u8mf4_rm_tu(vuint8mf4_t vd,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16mf2_u8mf4_rm_tu(vd, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_xu_f_w_bf16m1_u8mf2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vfncvt_xu_f_w_bf16m1_u8mf2_rm_tu(vuint8mf2_t vd,
+ vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16m1_u8mf2_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_xu_f_w_bf16m2_u8m1_rm_tu(
+// CHECK-RV64-SAME: <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vfncvt_xu_f_w_bf16m2_u8m1_rm_tu(vuint8m1_t vd,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16m2_u8m1_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_xu_f_w_bf16m4_u8m2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vfncvt_xu_f_w_bf16m4_u8m2_rm_tu(vuint8m2_t vd,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16m4_u8m2_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_xu_f_w_bf16m8_u8m4_rm_tu(
+// CHECK-RV64-SAME: <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vfncvt_xu_f_w_bf16m8_u8m4_rm_tu(vuint8m4_t vd,
+ vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16m8_u8m4_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfncvt_f_f_w_bf16mf4_rm_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfncvt.f.f.w.nxv1bf16.nxv1f32.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x float> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfncvt_f_f_w_bf16mf4_rm_tu(vbfloat16mf4_t vd,
+ vfloat32mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16mf4_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfncvt_f_f_w_bf16mf2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfncvt.f.f.w.nxv2bf16.nxv2f32.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x float> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfncvt_f_f_w_bf16mf2_rm_tu(vbfloat16mf2_t vd,
+ vfloat32m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16mf2_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfncvt_f_f_w_bf16m1_rm_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfncvt.f.f.w.nxv4bf16.nxv4f32.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x float> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfncvt_f_f_w_bf16m1_rm_tu(vbfloat16m1_t vd, vfloat32m2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16m1_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfncvt_f_f_w_bf16m2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfncvt.f.f.w.nxv8bf16.nxv8f32.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x float> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfncvt_f_f_w_bf16m2_rm_tu(vbfloat16m2_t vd, vfloat32m4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16m2_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfncvt_f_f_w_bf16m4_rm_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfncvt.f.f.w.nxv16bf16.nxv16f32.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x float> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfncvt_f_f_w_bf16m4_rm_tu(vbfloat16m4_t vd, vfloat32m8_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16m4_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_x_f_w_bf16mf4_i8mf8_rm_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vfncvt_x_f_w_bf16mf4_i8mf8_rm_tum(vbool64_t vm, vint8mf8_t vd,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16mf4_i8mf8_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_x_f_w_bf16mf2_i8mf4_rm_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vfncvt_x_f_w_bf16mf2_i8mf4_rm_tum(vbool32_t vm, vint8mf4_t vd,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16mf2_i8mf4_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_x_f_w_bf16m1_i8mf2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vfncvt_x_f_w_bf16m1_i8mf2_rm_tum(vbool16_t vm, vint8mf2_t vd,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16m1_i8mf2_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_x_f_w_bf16m2_i8m1_rm_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vfncvt_x_f_w_bf16m2_i8m1_rm_tum(vbool8_t vm, vint8m1_t vd,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16m2_i8m1_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_x_f_w_bf16m4_i8m2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vfncvt_x_f_w_bf16m4_i8m2_rm_tum(vbool4_t vm, vint8m2_t vd,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16m4_i8m2_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_x_f_w_bf16m8_i8m4_rm_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vfncvt_x_f_w_bf16m8_i8m4_rm_tum(vbool2_t vm, vint8m4_t vd,
+ vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16m8_i8m4_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_xu_f_w_bf16mf4_u8mf8_rm_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vfncvt_xu_f_w_bf16mf4_u8mf8_rm_tum(vbool64_t vm,
+ vuint8mf8_t vd,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16mf4_u8mf8_rm_tum(vm, vd, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_xu_f_w_bf16mf2_u8mf4_rm_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vfncvt_xu_f_w_bf16mf2_u8mf4_rm_tum(vbool32_t vm,
+ vuint8mf4_t vd,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16mf2_u8mf4_rm_tum(vm, vd, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_xu_f_w_bf16m1_u8mf2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vfncvt_xu_f_w_bf16m1_u8mf2_rm_tum(vbool16_t vm, vuint8mf2_t vd,
+ vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16m1_u8mf2_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_xu_f_w_bf16m2_u8m1_rm_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vfncvt_xu_f_w_bf16m2_u8m1_rm_tum(vbool8_t vm, vuint8m1_t vd,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16m2_u8m1_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_xu_f_w_bf16m4_u8m2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vfncvt_xu_f_w_bf16m4_u8m2_rm_tum(vbool4_t vm, vuint8m2_t vd,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16m4_u8m2_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_xu_f_w_bf16m8_u8m4_rm_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vfncvt_xu_f_w_bf16m8_u8m4_rm_tum(vbool2_t vm, vuint8m4_t vd,
+ vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16m8_u8m4_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfncvt_f_f_w_bf16mf4_rm_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv1bf16.nxv1f32.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfncvt_f_f_w_bf16mf4_rm_tum(vbool64_t vm, vbfloat16mf4_t vd,
+ vfloat32mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16mf4_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfncvt_f_f_w_bf16mf2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv2bf16.nxv2f32.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfncvt_f_f_w_bf16mf2_rm_tum(vbool32_t vm, vbfloat16mf2_t vd,
+ vfloat32m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16mf2_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfncvt_f_f_w_bf16m1_rm_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv4bf16.nxv4f32.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfncvt_f_f_w_bf16m1_rm_tum(vbool16_t vm, vbfloat16m1_t vd,
+ vfloat32m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16m1_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfncvt_f_f_w_bf16m2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv8bf16.nxv8f32.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfncvt_f_f_w_bf16m2_rm_tum(vbool8_t vm, vbfloat16m2_t vd,
+ vfloat32m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16m2_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfncvt_f_f_w_bf16m4_rm_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv16bf16.nxv16f32.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfncvt_f_f_w_bf16m4_rm_tum(vbool4_t vm, vbfloat16m4_t vd,
+ vfloat32m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16m4_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_x_f_w_bf16mf4_i8mf8_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vfncvt_x_f_w_bf16mf4_i8mf8_rm_tumu(vbool64_t vm, vint8mf8_t vd,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16mf4_i8mf8_rm_tumu(vm, vd, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_x_f_w_bf16mf2_i8mf4_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vfncvt_x_f_w_bf16mf2_i8mf4_rm_tumu(vbool32_t vm, vint8mf4_t vd,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16mf2_i8mf4_rm_tumu(vm, vd, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_x_f_w_bf16m1_i8mf2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vfncvt_x_f_w_bf16m1_i8mf2_rm_tumu(vbool16_t vm, vint8mf2_t vd,
+ vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16m1_i8mf2_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_x_f_w_bf16m2_i8m1_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vfncvt_x_f_w_bf16m2_i8m1_rm_tumu(vbool8_t vm, vint8m1_t vd,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16m2_i8m1_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_x_f_w_bf16m4_i8m2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vfncvt_x_f_w_bf16m4_i8m2_rm_tumu(vbool4_t vm, vint8m2_t vd,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16m4_i8m2_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_x_f_w_bf16m8_i8m4_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vfncvt_x_f_w_bf16m8_i8m4_rm_tumu(vbool2_t vm, vint8m4_t vd,
+ vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16m8_i8m4_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_xu_f_w_bf16mf4_u8mf8_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vfncvt_xu_f_w_bf16mf4_u8mf8_rm_tumu(vbool64_t vm,
+ vuint8mf8_t vd,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16mf4_u8mf8_rm_tumu(vm, vd, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_xu_f_w_bf16mf2_u8mf4_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vfncvt_xu_f_w_bf16mf2_u8mf4_rm_tumu(vbool32_t vm,
+ vuint8mf4_t vd,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16mf2_u8mf4_rm_tumu(vm, vd, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_xu_f_w_bf16m1_u8mf2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vfncvt_xu_f_w_bf16m1_u8mf2_rm_tumu(vbool16_t vm,
+ vuint8mf2_t vd,
+ vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16m1_u8mf2_rm_tumu(vm, vd, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_xu_f_w_bf16m2_u8m1_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vfncvt_xu_f_w_bf16m2_u8m1_rm_tumu(vbool8_t vm, vuint8m1_t vd,
+ vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16m2_u8m1_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_xu_f_w_bf16m4_u8m2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vfncvt_xu_f_w_bf16m4_u8m2_rm_tumu(vbool4_t vm, vuint8m2_t vd,
+ vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16m4_u8m2_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_xu_f_w_bf16m8_u8m4_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vfncvt_xu_f_w_bf16m8_u8m4_rm_tumu(vbool2_t vm, vuint8m4_t vd,
+ vbfloat16m8_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16m8_u8m4_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfncvt_f_f_w_bf16mf4_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv1bf16.nxv1f32.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfncvt_f_f_w_bf16mf4_rm_tumu(vbool64_t vm,
+ vbfloat16mf4_t vd,
+ vfloat32mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16mf4_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfncvt_f_f_w_bf16mf2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv2bf16.nxv2f32.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfncvt_f_f_w_bf16mf2_rm_tumu(vbool32_t vm,
+ vbfloat16mf2_t vd,
+ vfloat32m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16mf2_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfncvt_f_f_w_bf16m1_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv4bf16.nxv4f32.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfncvt_f_f_w_bf16m1_rm_tumu(vbool16_t vm, vbfloat16m1_t vd,
+ vfloat32m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16m1_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfncvt_f_f_w_bf16m2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv8bf16.nxv8f32.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfncvt_f_f_w_bf16m2_rm_tumu(vbool8_t vm, vbfloat16m2_t vd,
+ vfloat32m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16m2_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfncvt_f_f_w_bf16m4_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv16bf16.nxv16f32.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfncvt_f_f_w_bf16m4_rm_tumu(vbool4_t vm, vbfloat16m4_t vd,
+ vfloat32m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16m4_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_x_f_w_bf16mf4_i8mf8_rm_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vfncvt_x_f_w_bf16mf4_i8mf8_rm_mu(vbool64_t vm, vint8mf8_t vd,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16mf4_i8mf8_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_x_f_w_bf16mf2_i8mf4_rm_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vfncvt_x_f_w_bf16mf2_i8mf4_rm_mu(vbool32_t vm, vint8mf4_t vd,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16mf2_i8mf4_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_x_f_w_bf16m1_i8mf2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vfncvt_x_f_w_bf16m1_i8mf2_rm_mu(vbool16_t vm, vint8mf2_t vd,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16m1_i8mf2_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_x_f_w_bf16m2_i8m1_rm_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vfncvt_x_f_w_bf16m2_i8m1_rm_mu(vbool8_t vm, vint8m1_t vd,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16m2_i8m1_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_x_f_w_bf16m4_i8m2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vfncvt_x_f_w_bf16m4_i8m2_rm_mu(vbool4_t vm, vint8m2_t vd,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16m4_i8m2_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_x_f_w_bf16m8_i8m4_rm_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vfncvt_x_f_w_bf16m8_i8m4_rm_mu(vbool2_t vm, vint8m4_t vd,
+ vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_f_w_bf16m8_i8m4_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_xu_f_w_bf16mf4_u8mf8_rm_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vfncvt_xu_f_w_bf16mf4_u8mf8_rm_mu(vbool64_t vm, vuint8mf8_t vd,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16mf4_u8mf8_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_xu_f_w_bf16mf2_u8mf4_rm_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vfncvt_xu_f_w_bf16mf2_u8mf4_rm_mu(vbool32_t vm, vuint8mf4_t vd,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16mf2_u8mf4_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_xu_f_w_bf16m1_u8mf2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vfncvt_xu_f_w_bf16m1_u8mf2_rm_mu(vbool16_t vm, vuint8mf2_t vd,
+ vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16m1_u8mf2_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_xu_f_w_bf16m2_u8m1_rm_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vfncvt_xu_f_w_bf16m2_u8m1_rm_mu(vbool8_t vm, vuint8m1_t vd,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16m2_u8m1_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_xu_f_w_bf16m4_u8m2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vfncvt_xu_f_w_bf16m4_u8m2_rm_mu(vbool4_t vm, vuint8m2_t vd,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16m4_u8m2_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_xu_f_w_bf16m8_u8m4_rm_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vfncvt_xu_f_w_bf16m8_u8m4_rm_mu(vbool2_t vm, vuint8m4_t vd,
+ vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_f_w_bf16m8_u8m4_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfncvt_f_f_w_bf16mf4_rm_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv1bf16.nxv1f32.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfncvt_f_f_w_bf16mf4_rm_mu(vbool64_t vm, vbfloat16mf4_t vd,
+ vfloat32mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16mf4_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfncvt_f_f_w_bf16mf2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv2bf16.nxv2f32.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfncvt_f_f_w_bf16mf2_rm_mu(vbool32_t vm, vbfloat16mf2_t vd,
+ vfloat32m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16mf2_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfncvt_f_f_w_bf16m1_rm_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv4bf16.nxv4f32.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfncvt_f_f_w_bf16m1_rm_mu(vbool16_t vm, vbfloat16m1_t vd,
+ vfloat32m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16m1_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfncvt_f_f_w_bf16m2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv8bf16.nxv8f32.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfncvt_f_f_w_bf16m2_rm_mu(vbool8_t vm, vbfloat16m2_t vd,
+ vfloat32m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16m2_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfncvt_f_f_w_bf16m4_rm_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv16bf16.nxv16f32.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfncvt_f_f_w_bf16m4_rm_mu(vbool4_t vm, vbfloat16m4_t vd,
+ vfloat32m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_f_w_bf16m4_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfncvt_rod.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfncvt_rod.c
new file mode 100644
index 0000000000000..84066846e188c
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfncvt_rod.c
@@ -0,0 +1,233 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfncvt_rod_f_f_w_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.nxv1bf16.nxv1f32.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x float> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfncvt_rod_f_f_w_bf16mf4_tu(vbfloat16mf4_t vd,
+ vfloat32mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_rod_f_f_w_bf16mf4_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfncvt_rod_f_f_w_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.nxv2bf16.nxv2f32.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x float> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfncvt_rod_f_f_w_bf16mf2_tu(vbfloat16mf2_t vd,
+ vfloat32m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_rod_f_f_w_bf16mf2_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfncvt_rod_f_f_w_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.nxv4bf16.nxv4f32.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x float> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfncvt_rod_f_f_w_bf16m1_tu(vbfloat16m1_t vd,
+ vfloat32m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_rod_f_f_w_bf16m1_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfncvt_rod_f_f_w_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.nxv8bf16.nxv8f32.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x float> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfncvt_rod_f_f_w_bf16m2_tu(vbfloat16m2_t vd,
+ vfloat32m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_rod_f_f_w_bf16m2_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfncvt_rod_f_f_w_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.nxv16bf16.nxv16f32.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x float> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfncvt_rod_f_f_w_bf16m4_tu(vbfloat16m4_t vd,
+ vfloat32m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_rod_f_f_w_bf16m4_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfncvt_rod_f_f_w_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv1bf16.nxv1f32.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfncvt_rod_f_f_w_bf16mf4_tum(vbool64_t vm,
+ vbfloat16mf4_t vd,
+ vfloat32mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_rod_f_f_w_bf16mf4_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfncvt_rod_f_f_w_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv2bf16.nxv2f32.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfncvt_rod_f_f_w_bf16mf2_tum(vbool32_t vm,
+ vbfloat16mf2_t vd,
+ vfloat32m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_rod_f_f_w_bf16mf2_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfncvt_rod_f_f_w_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv4bf16.nxv4f32.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfncvt_rod_f_f_w_bf16m1_tum(vbool16_t vm, vbfloat16m1_t vd,
+ vfloat32m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_rod_f_f_w_bf16m1_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfncvt_rod_f_f_w_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv8bf16.nxv8f32.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfncvt_rod_f_f_w_bf16m2_tum(vbool8_t vm, vbfloat16m2_t vd,
+ vfloat32m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_rod_f_f_w_bf16m2_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfncvt_rod_f_f_w_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv16bf16.nxv16f32.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfncvt_rod_f_f_w_bf16m4_tum(vbool4_t vm, vbfloat16m4_t vd,
+ vfloat32m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_rod_f_f_w_bf16m4_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfncvt_rod_f_f_w_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv1bf16.nxv1f32.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfncvt_rod_f_f_w_bf16mf4_tumu(vbool64_t vm,
+ vbfloat16mf4_t vd,
+ vfloat32mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rod_f_f_w_bf16mf4_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfncvt_rod_f_f_w_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv2bf16.nxv2f32.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfncvt_rod_f_f_w_bf16mf2_tumu(vbool32_t vm,
+ vbfloat16mf2_t vd,
+ vfloat32m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_rod_f_f_w_bf16mf2_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfncvt_rod_f_f_w_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv4bf16.nxv4f32.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfncvt_rod_f_f_w_bf16m1_tumu(vbool16_t vm, vbfloat16m1_t vd,
+ vfloat32m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_rod_f_f_w_bf16m1_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfncvt_rod_f_f_w_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv8bf16.nxv8f32.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfncvt_rod_f_f_w_bf16m2_tumu(vbool8_t vm, vbfloat16m2_t vd,
+ vfloat32m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_rod_f_f_w_bf16m2_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfncvt_rod_f_f_w_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv16bf16.nxv16f32.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfncvt_rod_f_f_w_bf16m4_tumu(vbool4_t vm, vbfloat16m4_t vd,
+ vfloat32m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_rod_f_f_w_bf16m4_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfncvt_rod_f_f_w_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv1bf16.nxv1f32.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfncvt_rod_f_f_w_bf16mf4_mu(vbool64_t vm, vbfloat16mf4_t vd,
+ vfloat32mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_rod_f_f_w_bf16mf4_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfncvt_rod_f_f_w_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv2bf16.nxv2f32.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfncvt_rod_f_f_w_bf16mf2_mu(vbool32_t vm, vbfloat16mf2_t vd,
+ vfloat32m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_rod_f_f_w_bf16mf2_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfncvt_rod_f_f_w_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv4bf16.nxv4f32.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfncvt_rod_f_f_w_bf16m1_mu(vbool16_t vm, vbfloat16m1_t vd,
+ vfloat32m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_rod_f_f_w_bf16m1_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfncvt_rod_f_f_w_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv8bf16.nxv8f32.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfncvt_rod_f_f_w_bf16m2_mu(vbool8_t vm, vbfloat16m2_t vd,
+ vfloat32m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_rod_f_f_w_bf16m2_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfncvt_rod_f_f_w_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv16bf16.nxv16f32.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfncvt_rod_f_f_w_bf16m4_mu(vbool4_t vm, vbfloat16m4_t vd,
+ vfloat32m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_rod_f_f_w_bf16m4_mu(vm, vd, vs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfncvt_rtz.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfncvt_rtz.c
new file mode 100644
index 0000000000000..4644eff12e210
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfncvt_rtz.c
@@ -0,0 +1,572 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_rtz_x_f_w_bf16mf4_i8mf8_tu(
+// CHECK-RV64-SAME: <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vfncvt_rtz_x_f_w_bf16mf4_i8mf8_tu(vint8mf8_t vd,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_x_f_w_bf16mf4_i8mf8_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_rtz_x_f_w_bf16mf2_i8mf4_tu(
+// CHECK-RV64-SAME: <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vfncvt_rtz_x_f_w_bf16mf2_i8mf4_tu(vint8mf4_t vd,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_x_f_w_bf16mf2_i8mf4_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_rtz_x_f_w_bf16m1_i8mf2_tu(
+// CHECK-RV64-SAME: <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vfncvt_rtz_x_f_w_bf16m1_i8mf2_tu(vint8mf2_t vd,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_x_f_w_bf16m1_i8mf2_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_rtz_x_f_w_bf16m2_i8m1_tu(
+// CHECK-RV64-SAME: <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vfncvt_rtz_x_f_w_bf16m2_i8m1_tu(vint8m1_t vd, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_x_f_w_bf16m2_i8m1_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_rtz_x_f_w_bf16m4_i8m2_tu(
+// CHECK-RV64-SAME: <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vfncvt_rtz_x_f_w_bf16m4_i8m2_tu(vint8m2_t vd, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_x_f_w_bf16m4_i8m2_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_rtz_x_f_w_bf16m8_i8m4_tu(
+// CHECK-RV64-SAME: <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vfncvt_rtz_x_f_w_bf16m8_i8m4_tu(vint8m4_t vd, vbfloat16m8_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_x_f_w_bf16m8_i8m4_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_rtz_xu_f_w_bf16mf4_u8mf8_tu(
+// CHECK-RV64-SAME: <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vfncvt_rtz_xu_f_w_bf16mf4_u8mf8_tu(vuint8mf8_t vd,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu_f_w_bf16mf4_u8mf8_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_rtz_xu_f_w_bf16mf2_u8mf4_tu(
+// CHECK-RV64-SAME: <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vfncvt_rtz_xu_f_w_bf16mf2_u8mf4_tu(vuint8mf4_t vd,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu_f_w_bf16mf2_u8mf4_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_rtz_xu_f_w_bf16m1_u8mf2_tu(
+// CHECK-RV64-SAME: <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vfncvt_rtz_xu_f_w_bf16m1_u8mf2_tu(vuint8mf2_t vd,
+ vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu_f_w_bf16m1_u8mf2_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_rtz_xu_f_w_bf16m2_u8m1_tu(
+// CHECK-RV64-SAME: <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vfncvt_rtz_xu_f_w_bf16m2_u8m1_tu(vuint8m1_t vd,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_xu_f_w_bf16m2_u8m1_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_rtz_xu_f_w_bf16m4_u8m2_tu(
+// CHECK-RV64-SAME: <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vfncvt_rtz_xu_f_w_bf16m4_u8m2_tu(vuint8m2_t vd,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_xu_f_w_bf16m4_u8m2_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_rtz_xu_f_w_bf16m8_u8m4_tu(
+// CHECK-RV64-SAME: <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vfncvt_rtz_xu_f_w_bf16m8_u8m4_tu(vuint8m4_t vd,
+ vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_xu_f_w_bf16m8_u8m4_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_rtz_x_f_w_bf16mf4_i8mf8_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vfncvt_rtz_x_f_w_bf16mf4_i8mf8_tum(vbool64_t vm, vint8mf8_t vd,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_x_f_w_bf16mf4_i8mf8_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_rtz_x_f_w_bf16mf2_i8mf4_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vfncvt_rtz_x_f_w_bf16mf2_i8mf4_tum(vbool32_t vm, vint8mf4_t vd,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_x_f_w_bf16mf2_i8mf4_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_rtz_x_f_w_bf16m1_i8mf2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vfncvt_rtz_x_f_w_bf16m1_i8mf2_tum(vbool16_t vm, vint8mf2_t vd,
+ vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_x_f_w_bf16m1_i8mf2_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_rtz_x_f_w_bf16m2_i8m1_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vfncvt_rtz_x_f_w_bf16m2_i8m1_tum(vbool8_t vm, vint8m1_t vd,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_x_f_w_bf16m2_i8m1_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_rtz_x_f_w_bf16m4_i8m2_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vfncvt_rtz_x_f_w_bf16m4_i8m2_tum(vbool4_t vm, vint8m2_t vd,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_x_f_w_bf16m4_i8m2_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_rtz_x_f_w_bf16m8_i8m4_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vfncvt_rtz_x_f_w_bf16m8_i8m4_tum(vbool2_t vm, vint8m4_t vd,
+ vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_x_f_w_bf16m8_i8m4_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_rtz_xu_f_w_bf16mf4_u8mf8_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vfncvt_rtz_xu_f_w_bf16mf4_u8mf8_tum(vbool64_t vm,
+ vuint8mf8_t vd,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu_f_w_bf16mf4_u8mf8_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_rtz_xu_f_w_bf16mf2_u8mf4_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vfncvt_rtz_xu_f_w_bf16mf2_u8mf4_tum(vbool32_t vm,
+ vuint8mf4_t vd,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu_f_w_bf16mf2_u8mf4_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_rtz_xu_f_w_bf16m1_u8mf2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vfncvt_rtz_xu_f_w_bf16m1_u8mf2_tum(vbool16_t vm,
+ vuint8mf2_t vd,
+ vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu_f_w_bf16m1_u8mf2_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_rtz_xu_f_w_bf16m2_u8m1_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vfncvt_rtz_xu_f_w_bf16m2_u8m1_tum(vbool8_t vm, vuint8m1_t vd,
+ vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu_f_w_bf16m2_u8m1_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_rtz_xu_f_w_bf16m4_u8m2_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vfncvt_rtz_xu_f_w_bf16m4_u8m2_tum(vbool4_t vm, vuint8m2_t vd,
+ vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu_f_w_bf16m4_u8m2_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_rtz_xu_f_w_bf16m8_u8m4_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vfncvt_rtz_xu_f_w_bf16m8_u8m4_tum(vbool2_t vm, vuint8m4_t vd,
+ vbfloat16m8_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu_f_w_bf16m8_u8m4_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_rtz_x_f_w_bf16mf4_i8mf8_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vfncvt_rtz_x_f_w_bf16mf4_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_x_f_w_bf16mf4_i8mf8_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_rtz_x_f_w_bf16mf2_i8mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vfncvt_rtz_x_f_w_bf16mf2_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_x_f_w_bf16mf2_i8mf4_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_rtz_x_f_w_bf16m1_i8mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vfncvt_rtz_x_f_w_bf16m1_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd,
+ vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_x_f_w_bf16m1_i8mf2_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_rtz_x_f_w_bf16m2_i8m1_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vfncvt_rtz_x_f_w_bf16m2_i8m1_tumu(vbool8_t vm, vint8m1_t vd,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_x_f_w_bf16m2_i8m1_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_rtz_x_f_w_bf16m4_i8m2_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vfncvt_rtz_x_f_w_bf16m4_i8m2_tumu(vbool4_t vm, vint8m2_t vd,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_x_f_w_bf16m4_i8m2_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_rtz_x_f_w_bf16m8_i8m4_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vfncvt_rtz_x_f_w_bf16m8_i8m4_tumu(vbool2_t vm, vint8m4_t vd,
+ vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_x_f_w_bf16m8_i8m4_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_rtz_xu_f_w_bf16mf4_u8mf8_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vfncvt_rtz_xu_f_w_bf16mf4_u8mf8_tumu(vbool64_t vm,
+ vuint8mf8_t vd,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu_f_w_bf16mf4_u8mf8_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_rtz_xu_f_w_bf16mf2_u8mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vfncvt_rtz_xu_f_w_bf16mf2_u8mf4_tumu(vbool32_t vm,
+ vuint8mf4_t vd,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu_f_w_bf16mf2_u8mf4_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_rtz_xu_f_w_bf16m1_u8mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vfncvt_rtz_xu_f_w_bf16m1_u8mf2_tumu(vbool16_t vm,
+ vuint8mf2_t vd,
+ vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu_f_w_bf16m1_u8mf2_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_rtz_xu_f_w_bf16m2_u8m1_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vfncvt_rtz_xu_f_w_bf16m2_u8m1_tumu(vbool8_t vm, vuint8m1_t vd,
+ vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu_f_w_bf16m2_u8m1_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_rtz_xu_f_w_bf16m4_u8m2_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vfncvt_rtz_xu_f_w_bf16m4_u8m2_tumu(vbool4_t vm, vuint8m2_t vd,
+ vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu_f_w_bf16m4_u8m2_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_rtz_xu_f_w_bf16m8_u8m4_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vfncvt_rtz_xu_f_w_bf16m8_u8m4_tumu(vbool2_t vm, vuint8m4_t vd,
+ vbfloat16m8_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu_f_w_bf16m8_u8m4_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_rtz_x_f_w_bf16mf4_i8mf8_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vfncvt_rtz_x_f_w_bf16mf4_i8mf8_mu(vbool64_t vm, vint8mf8_t vd,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_x_f_w_bf16mf4_i8mf8_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_rtz_x_f_w_bf16mf2_i8mf4_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vfncvt_rtz_x_f_w_bf16mf2_i8mf4_mu(vbool32_t vm, vint8mf4_t vd,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_x_f_w_bf16mf2_i8mf4_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_rtz_x_f_w_bf16m1_i8mf2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vfncvt_rtz_x_f_w_bf16m1_i8mf2_mu(vbool16_t vm, vint8mf2_t vd,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_x_f_w_bf16m1_i8mf2_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_rtz_x_f_w_bf16m2_i8m1_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vfncvt_rtz_x_f_w_bf16m2_i8m1_mu(vbool8_t vm, vint8m1_t vd,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_x_f_w_bf16m2_i8m1_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_rtz_x_f_w_bf16m4_i8m2_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vfncvt_rtz_x_f_w_bf16m4_i8m2_mu(vbool4_t vm, vint8m2_t vd,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_x_f_w_bf16m4_i8m2_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_rtz_x_f_w_bf16m8_i8m4_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vfncvt_rtz_x_f_w_bf16m8_i8m4_mu(vbool2_t vm, vint8m4_t vd,
+ vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_x_f_w_bf16m8_i8m4_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_rtz_xu_f_w_bf16mf4_u8mf8_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vfncvt_rtz_xu_f_w_bf16mf4_u8mf8_mu(vbool64_t vm,
+ vuint8mf8_t vd,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu_f_w_bf16mf4_u8mf8_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_rtz_xu_f_w_bf16mf2_u8mf4_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vfncvt_rtz_xu_f_w_bf16mf2_u8mf4_mu(vbool32_t vm,
+ vuint8mf4_t vd,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu_f_w_bf16mf2_u8mf4_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_rtz_xu_f_w_bf16m1_u8mf2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vfncvt_rtz_xu_f_w_bf16m1_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd,
+ vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu_f_w_bf16m1_u8mf2_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_rtz_xu_f_w_bf16m2_u8m1_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vfncvt_rtz_xu_f_w_bf16m2_u8m1_mu(vbool8_t vm, vuint8m1_t vd,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_xu_f_w_bf16m2_u8m1_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_rtz_xu_f_w_bf16m4_u8m2_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vfncvt_rtz_xu_f_w_bf16m4_u8m2_mu(vbool4_t vm, vuint8m2_t vd,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_xu_f_w_bf16m4_u8m2_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_rtz_xu_f_w_bf16m8_u8m4_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vfncvt_rtz_xu_f_w_bf16m8_u8m4_mu(vbool2_t vm, vuint8m4_t vd,
+ vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_xu_f_w_bf16m8_u8m4_mu(vm, vd, vs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfnmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfnmacc.c
new file mode 100644
index 0000000000000..93fd6ba2faff7
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfnmacc.c
@@ -0,0 +1,489 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmacc_vv_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmacc.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmacc_vv_bf16mf4_tu(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vv_bf16mf4_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmacc_vf_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmacc.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmacc_vf_bf16mf4_tu(vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vf_bf16mf4_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmacc_vv_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmacc.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmacc_vv_bf16mf2_tu(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vv_bf16mf2_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmacc_vf_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmacc.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmacc_vf_bf16mf2_tu(vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vf_bf16mf2_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmacc_vv_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmacc.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmacc_vv_bf16m1_tu(vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vv_bf16m1_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmacc_vf_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmacc.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmacc_vf_bf16m1_tu(vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vf_bf16m1_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmacc_vv_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmacc.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmacc_vv_bf16m2_tu(vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vv_bf16m2_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmacc_vf_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmacc.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmacc_vf_bf16m2_tu(vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vf_bf16m2_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmacc_vv_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmacc.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmacc_vv_bf16m4_tu(vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vv_bf16m4_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmacc_vf_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmacc.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmacc_vf_bf16m4_tu(vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vf_bf16m4_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmacc_vv_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmacc.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmacc_vv_bf16m8_tu(vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vv_bf16m8_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmacc_vf_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmacc.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmacc_vf_bf16m8_tu(vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vf_bf16m8_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmacc_vv_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmacc.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmacc_vv_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vv_bf16mf4_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmacc_vf_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmacc.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmacc_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vf_bf16mf4_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmacc_vv_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmacc.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmacc_vv_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vv_bf16mf2_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmacc_vf_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmacc.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmacc_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vf_bf16mf2_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmacc_vv_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmacc.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmacc_vv_bf16m1_tum(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vv_bf16m1_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmacc_vf_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmacc.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmacc_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vf_bf16m1_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmacc_vv_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmacc.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmacc_vv_bf16m2_tum(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vv_bf16m2_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmacc_vf_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmacc.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmacc_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vf_bf16m2_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmacc_vv_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmacc.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmacc_vv_bf16m4_tum(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vv_bf16m4_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmacc_vf_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmacc.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmacc_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vf_bf16m4_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmacc_vv_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmacc.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmacc_vv_bf16m8_tum(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vv_bf16m8_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmacc_vf_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmacc.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmacc_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vf_bf16m8_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmacc_vv_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmacc.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmacc_vv_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vv_bf16mf4_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmacc_vf_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmacc.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmacc_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vf_bf16mf4_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmacc_vv_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmacc.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmacc_vv_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vv_bf16mf2_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmacc_vf_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmacc.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmacc_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vf_bf16mf2_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmacc_vv_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmacc.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmacc_vv_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vv_bf16m1_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmacc_vf_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmacc.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmacc_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vf_bf16m1_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmacc_vv_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmacc.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmacc_vv_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vv_bf16m2_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmacc_vf_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmacc.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmacc_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vf_bf16m2_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmacc_vv_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmacc.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmacc_vv_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vv_bf16m4_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmacc_vf_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmacc.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmacc_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vf_bf16m4_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmacc_vv_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmacc.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmacc_vv_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vv_bf16m8_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmacc_vf_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmacc.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmacc_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vf_bf16m8_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmacc_vv_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmacc.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmacc_vv_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vv_bf16mf4_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmacc_vf_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmacc.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmacc_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vf_bf16mf4_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmacc_vv_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmacc.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmacc_vv_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vv_bf16mf2_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmacc_vf_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmacc.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmacc_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vf_bf16mf2_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmacc_vv_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmacc.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmacc_vv_bf16m1_mu(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vv_bf16m1_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmacc_vf_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmacc.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmacc_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vf_bf16m1_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmacc_vv_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmacc.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmacc_vv_bf16m2_mu(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vv_bf16m2_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmacc_vf_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmacc.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmacc_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vf_bf16m2_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmacc_vv_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmacc.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmacc_vv_bf16m4_mu(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vv_bf16m4_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmacc_vf_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmacc.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmacc_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vf_bf16m4_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmacc_vv_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmacc.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmacc_vv_bf16m8_mu(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vv_bf16m8_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmacc_vf_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmacc.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmacc_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmacc_vf_bf16m8_mu(mask, vd, rs1, vs2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfnmadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfnmadd.c
new file mode 100644
index 0000000000000..d7e6b8225d8c0
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfnmadd.c
@@ -0,0 +1,489 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmadd_vv_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmadd.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmadd_vv_bf16mf4_tu(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vv_bf16mf4_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmadd_vf_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmadd.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmadd_vf_bf16mf4_tu(vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vf_bf16mf4_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmadd_vv_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmadd.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmadd_vv_bf16mf2_tu(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vv_bf16mf2_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmadd_vf_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmadd.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmadd_vf_bf16mf2_tu(vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vf_bf16mf2_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmadd_vv_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmadd.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmadd_vv_bf16m1_tu(vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vv_bf16m1_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmadd_vf_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmadd.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmadd_vf_bf16m1_tu(vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vf_bf16m1_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmadd_vv_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmadd.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmadd_vv_bf16m2_tu(vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vv_bf16m2_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmadd_vf_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmadd.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmadd_vf_bf16m2_tu(vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vf_bf16m2_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmadd_vv_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmadd.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmadd_vv_bf16m4_tu(vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vv_bf16m4_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmadd_vf_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmadd.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmadd_vf_bf16m4_tu(vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vf_bf16m4_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmadd_vv_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmadd.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmadd_vv_bf16m8_tu(vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vv_bf16m8_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmadd_vf_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmadd.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmadd_vf_bf16m8_tu(vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vf_bf16m8_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmadd_vv_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmadd.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmadd_vv_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vv_bf16mf4_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmadd_vf_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmadd.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmadd_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vf_bf16mf4_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmadd_vv_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmadd.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmadd_vv_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vv_bf16mf2_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmadd_vf_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmadd.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmadd_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vf_bf16mf2_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmadd_vv_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmadd.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmadd_vv_bf16m1_tum(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vv_bf16m1_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmadd_vf_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmadd.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmadd_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vf_bf16m1_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmadd_vv_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmadd.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmadd_vv_bf16m2_tum(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vv_bf16m2_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmadd_vf_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmadd.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmadd_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vf_bf16m2_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmadd_vv_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmadd.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmadd_vv_bf16m4_tum(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vv_bf16m4_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmadd_vf_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmadd.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmadd_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vf_bf16m4_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmadd_vv_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmadd.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmadd_vv_bf16m8_tum(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vv_bf16m8_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmadd_vf_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmadd.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmadd_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vf_bf16m8_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmadd_vv_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmadd.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmadd_vv_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vv_bf16mf4_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmadd_vf_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmadd.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmadd_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vf_bf16mf4_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmadd_vv_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmadd.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmadd_vv_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vv_bf16mf2_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmadd_vf_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmadd.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmadd_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vf_bf16mf2_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmadd_vv_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmadd.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmadd_vv_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vv_bf16m1_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmadd_vf_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmadd.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmadd_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vf_bf16m1_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmadd_vv_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmadd.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmadd_vv_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vv_bf16m2_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmadd_vf_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmadd.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmadd_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vf_bf16m2_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmadd_vv_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmadd.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmadd_vv_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vv_bf16m4_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmadd_vf_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmadd.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmadd_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vf_bf16m4_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmadd_vv_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmadd.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmadd_vv_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vv_bf16m8_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmadd_vf_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmadd.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmadd_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vf_bf16m8_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmadd_vv_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmadd.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmadd_vv_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vv_bf16mf4_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmadd_vf_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmadd.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmadd_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vf_bf16mf4_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmadd_vv_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmadd.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmadd_vv_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vv_bf16mf2_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmadd_vf_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmadd.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmadd_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vf_bf16mf2_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmadd_vv_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmadd.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmadd_vv_bf16m1_mu(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vv_bf16m1_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmadd_vf_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmadd.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmadd_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vf_bf16m1_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmadd_vv_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmadd.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmadd_vv_bf16m2_mu(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vv_bf16m2_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmadd_vf_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmadd.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmadd_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vf_bf16m2_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmadd_vv_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmadd.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmadd_vv_bf16m4_mu(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vv_bf16m4_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmadd_vf_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmadd.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmadd_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vf_bf16m4_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmadd_vv_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmadd.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmadd_vv_bf16m8_mu(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vv_bf16m8_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmadd_vf_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmadd.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmadd_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmadd_vf_bf16m8_mu(mask, vd, rs1, vs2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfnmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfnmsac.c
new file mode 100644
index 0000000000000..e0c289d23c17f
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfnmsac.c
@@ -0,0 +1,489 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmsac_vv_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsac.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmsac_vv_bf16mf4_tu(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vv_bf16mf4_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmsac_vf_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsac.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmsac_vf_bf16mf4_tu(vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vf_bf16mf4_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmsac_vv_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsac.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmsac_vv_bf16mf2_tu(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vv_bf16mf2_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmsac_vf_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsac.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmsac_vf_bf16mf2_tu(vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vf_bf16mf2_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmsac_vv_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsac.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmsac_vv_bf16m1_tu(vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vv_bf16m1_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmsac_vf_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsac.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmsac_vf_bf16m1_tu(vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vf_bf16m1_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmsac_vv_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsac.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmsac_vv_bf16m2_tu(vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vv_bf16m2_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmsac_vf_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsac.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmsac_vf_bf16m2_tu(vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vf_bf16m2_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmsac_vv_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsac.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmsac_vv_bf16m4_tu(vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vv_bf16m4_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmsac_vf_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsac.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmsac_vf_bf16m4_tu(vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vf_bf16m4_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmsac_vv_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmsac.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmsac_vv_bf16m8_tu(vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vv_bf16m8_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmsac_vf_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmsac.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmsac_vf_bf16m8_tu(vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vf_bf16m8_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmsac_vv_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsac.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmsac_vv_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vv_bf16mf4_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmsac_vf_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsac.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmsac_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vf_bf16mf4_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmsac_vv_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsac.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmsac_vv_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vv_bf16mf2_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmsac_vf_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsac.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmsac_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vf_bf16mf2_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmsac_vv_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsac.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmsac_vv_bf16m1_tum(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vv_bf16m1_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmsac_vf_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsac.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmsac_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vf_bf16m1_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmsac_vv_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsac.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmsac_vv_bf16m2_tum(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vv_bf16m2_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmsac_vf_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsac.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmsac_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vf_bf16m2_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmsac_vv_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsac.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmsac_vv_bf16m4_tum(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vv_bf16m4_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmsac_vf_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsac.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmsac_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vf_bf16m4_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmsac_vv_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmsac.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmsac_vv_bf16m8_tum(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vv_bf16m8_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmsac_vf_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmsac.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmsac_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vf_bf16m8_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmsac_vv_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsac.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmsac_vv_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vv_bf16mf4_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmsac_vf_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsac.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmsac_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vf_bf16mf4_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmsac_vv_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsac.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmsac_vv_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vv_bf16mf2_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmsac_vf_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsac.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmsac_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vf_bf16mf2_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmsac_vv_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsac.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmsac_vv_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vv_bf16m1_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmsac_vf_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsac.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmsac_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vf_bf16m1_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmsac_vv_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsac.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmsac_vv_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vv_bf16m2_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmsac_vf_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsac.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmsac_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vf_bf16m2_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmsac_vv_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsac.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmsac_vv_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vv_bf16m4_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmsac_vf_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsac.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmsac_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vf_bf16m4_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmsac_vv_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmsac.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmsac_vv_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vv_bf16m8_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmsac_vf_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmsac.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmsac_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vf_bf16m8_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmsac_vv_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsac.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmsac_vv_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vv_bf16mf4_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmsac_vf_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsac.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmsac_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vf_bf16mf4_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmsac_vv_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsac.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmsac_vv_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vv_bf16mf2_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmsac_vf_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsac.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmsac_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vf_bf16mf2_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmsac_vv_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsac.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmsac_vv_bf16m1_mu(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vv_bf16m1_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmsac_vf_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsac.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmsac_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vf_bf16m1_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmsac_vv_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsac.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmsac_vv_bf16m2_mu(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vv_bf16m2_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmsac_vf_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsac.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmsac_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vf_bf16m2_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmsac_vv_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsac.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmsac_vv_bf16m4_mu(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vv_bf16m4_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmsac_vf_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsac.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmsac_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vf_bf16m4_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmsac_vv_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmsac.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmsac_vv_bf16m8_mu(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vv_bf16m8_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmsac_vf_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmsac.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmsac_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmsac_vf_bf16m8_mu(mask, vd, rs1, vs2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfnmsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfnmsub.c
new file mode 100644
index 0000000000000..05ccda36e9032
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfnmsub.c
@@ -0,0 +1,489 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmsub_vv_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsub.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmsub_vv_bf16mf4_tu(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vv_bf16mf4_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmsub_vf_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsub.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmsub_vf_bf16mf4_tu(vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vf_bf16mf4_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmsub_vv_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsub.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmsub_vv_bf16mf2_tu(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vv_bf16mf2_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmsub_vf_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsub.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmsub_vf_bf16mf2_tu(vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vf_bf16mf2_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmsub_vv_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsub.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmsub_vv_bf16m1_tu(vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vv_bf16m1_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmsub_vf_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsub.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmsub_vf_bf16m1_tu(vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vf_bf16m1_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmsub_vv_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsub.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmsub_vv_bf16m2_tu(vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vv_bf16m2_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmsub_vf_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsub.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmsub_vf_bf16m2_tu(vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vf_bf16m2_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmsub_vv_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsub.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmsub_vv_bf16m4_tu(vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vv_bf16m4_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmsub_vf_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsub.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmsub_vf_bf16m4_tu(vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vf_bf16m4_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmsub_vv_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmsub.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmsub_vv_bf16m8_tu(vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vv_bf16m8_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmsub_vf_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmsub.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmsub_vf_bf16m8_tu(vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vf_bf16m8_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmsub_vv_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsub.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmsub_vv_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vv_bf16mf4_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmsub_vf_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsub.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmsub_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vf_bf16mf4_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmsub_vv_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsub.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmsub_vv_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vv_bf16mf2_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmsub_vf_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsub.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmsub_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vf_bf16mf2_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmsub_vv_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsub.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmsub_vv_bf16m1_tum(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vv_bf16m1_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmsub_vf_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsub.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmsub_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vf_bf16m1_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmsub_vv_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsub.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmsub_vv_bf16m2_tum(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vv_bf16m2_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmsub_vf_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsub.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmsub_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vf_bf16m2_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmsub_vv_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsub.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmsub_vv_bf16m4_tum(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vv_bf16m4_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmsub_vf_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsub.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmsub_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vf_bf16m4_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmsub_vv_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmsub.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmsub_vv_bf16m8_tum(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vv_bf16m8_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmsub_vf_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmsub.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmsub_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vf_bf16m8_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmsub_vv_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsub.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmsub_vv_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vv_bf16mf4_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmsub_vf_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsub.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmsub_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vf_bf16mf4_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmsub_vv_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsub.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmsub_vv_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vv_bf16mf2_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmsub_vf_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsub.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmsub_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vf_bf16mf2_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmsub_vv_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsub.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmsub_vv_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vv_bf16m1_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmsub_vf_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsub.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmsub_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vf_bf16m1_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmsub_vv_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsub.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmsub_vv_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vv_bf16m2_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmsub_vf_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsub.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmsub_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vf_bf16m2_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmsub_vv_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsub.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmsub_vv_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vv_bf16m4_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmsub_vf_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsub.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmsub_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vf_bf16m4_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmsub_vv_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmsub.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmsub_vv_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vv_bf16m8_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmsub_vf_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmsub.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmsub_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vf_bf16m8_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmsub_vv_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsub.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmsub_vv_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vv_bf16mf4_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmsub_vf_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsub.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmsub_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vf_bf16mf4_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmsub_vv_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsub.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmsub_vv_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vv_bf16mf2_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmsub_vf_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsub.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmsub_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vf_bf16mf2_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmsub_vv_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsub.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmsub_vv_bf16m1_mu(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vv_bf16m1_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmsub_vf_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsub.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmsub_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vf_bf16m1_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmsub_vv_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsub.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmsub_vv_bf16m2_mu(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vv_bf16m2_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmsub_vf_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsub.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmsub_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vf_bf16m2_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmsub_vv_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsub.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmsub_vv_bf16m4_mu(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vv_bf16m4_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmsub_vf_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsub.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmsub_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vf_bf16m4_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmsub_vv_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmsub.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmsub_vv_bf16m8_mu(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vv_bf16m8_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmsub_vf_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmsub.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmsub_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmsub_vf_bf16m8_mu(mask, vd, rs1, vs2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfrec7.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfrec7.c
new file mode 100644
index 0000000000000..3123692b45a68
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfrec7.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfrec7_v_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfrec7.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfrec7_v_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, size_t vl) {
+ return __riscv_vfrec7_v_bf16mf4_tu(maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfrec7_v_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfrec7.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfrec7_v_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, size_t vl) {
+ return __riscv_vfrec7_v_bf16mf2_tu(maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfrec7_v_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfrec7.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfrec7_v_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, size_t vl) {
+ return __riscv_vfrec7_v_bf16m1_tu(maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfrec7_v_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfrec7.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfrec7_v_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, size_t vl) {
+ return __riscv_vfrec7_v_bf16m2_tu(maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfrec7_v_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfrec7.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfrec7_v_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, size_t vl) {
+ return __riscv_vfrec7_v_bf16m4_tu(maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfrec7_v_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfrec7.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfrec7_v_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, size_t vl) {
+ return __riscv_vfrec7_v_bf16m8_tu(maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfrec7_v_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfrec7.mask.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfrec7_v_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, size_t vl) {
+ return __riscv_vfrec7_v_bf16mf4_tum(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfrec7_v_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfrec7.mask.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfrec7_v_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, size_t vl) {
+ return __riscv_vfrec7_v_bf16mf2_tum(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfrec7_v_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfrec7.mask.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfrec7_v_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, size_t vl) {
+ return __riscv_vfrec7_v_bf16m1_tum(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfrec7_v_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfrec7.mask.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfrec7_v_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, size_t vl) {
+ return __riscv_vfrec7_v_bf16m2_tum(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfrec7_v_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfrec7.mask.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfrec7_v_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, size_t vl) {
+ return __riscv_vfrec7_v_bf16m4_tum(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfrec7_v_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfrec7.mask.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfrec7_v_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, size_t vl) {
+ return __riscv_vfrec7_v_bf16m8_tum(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfrec7_v_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfrec7.mask.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfrec7_v_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, size_t vl) {
+ return __riscv_vfrec7_v_bf16mf4_tumu(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfrec7_v_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfrec7.mask.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfrec7_v_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, size_t vl) {
+ return __riscv_vfrec7_v_bf16mf2_tumu(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfrec7_v_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfrec7.mask.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfrec7_v_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, size_t vl) {
+ return __riscv_vfrec7_v_bf16m1_tumu(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfrec7_v_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfrec7.mask.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfrec7_v_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, size_t vl) {
+ return __riscv_vfrec7_v_bf16m2_tumu(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfrec7_v_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfrec7.mask.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfrec7_v_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, size_t vl) {
+ return __riscv_vfrec7_v_bf16m4_tumu(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfrec7_v_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfrec7.mask.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfrec7_v_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, size_t vl) {
+ return __riscv_vfrec7_v_bf16m8_tumu(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfrec7_v_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfrec7.mask.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfrec7_v_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, size_t vl) {
+ return __riscv_vfrec7_v_bf16mf4_mu(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfrec7_v_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfrec7.mask.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfrec7_v_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, size_t vl) {
+ return __riscv_vfrec7_v_bf16mf2_mu(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfrec7_v_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfrec7.mask.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfrec7_v_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, size_t vl) {
+ return __riscv_vfrec7_v_bf16m1_mu(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfrec7_v_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfrec7.mask.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfrec7_v_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, size_t vl) {
+ return __riscv_vfrec7_v_bf16m2_mu(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfrec7_v_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfrec7.mask.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfrec7_v_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, size_t vl) {
+ return __riscv_vfrec7_v_bf16m4_mu(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfrec7_v_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfrec7.mask.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfrec7_v_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, size_t vl) {
+ return __riscv_vfrec7_v_bf16m8_mu(mask, maskedoff, op1, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfrsqrt7.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfrsqrt7.c
new file mode 100644
index 0000000000000..8436f0ea488b2
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfrsqrt7.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfrsqrt7_v_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfrsqrt7.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfrsqrt7_v_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_v_bf16mf4_tu(maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfrsqrt7_v_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfrsqrt7.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfrsqrt7_v_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_v_bf16mf2_tu(maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfrsqrt7_v_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfrsqrt7.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfrsqrt7_v_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_v_bf16m1_tu(maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfrsqrt7_v_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfrsqrt7.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfrsqrt7_v_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_v_bf16m2_tu(maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfrsqrt7_v_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfrsqrt7.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfrsqrt7_v_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_v_bf16m4_tu(maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfrsqrt7_v_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfrsqrt7.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfrsqrt7_v_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_v_bf16m8_tu(maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfrsqrt7_v_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfrsqrt7_v_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_v_bf16mf4_tum(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfrsqrt7_v_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfrsqrt7_v_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_v_bf16mf2_tum(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfrsqrt7_v_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfrsqrt7_v_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_v_bf16m1_tum(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfrsqrt7_v_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfrsqrt7_v_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_v_bf16m2_tum(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfrsqrt7_v_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfrsqrt7_v_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_v_bf16m4_tum(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfrsqrt7_v_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfrsqrt7_v_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_v_bf16m8_tum(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfrsqrt7_v_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfrsqrt7_v_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_v_bf16mf4_tumu(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfrsqrt7_v_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfrsqrt7_v_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_v_bf16mf2_tumu(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfrsqrt7_v_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfrsqrt7_v_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_v_bf16m1_tumu(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfrsqrt7_v_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfrsqrt7_v_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_v_bf16m2_tumu(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfrsqrt7_v_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfrsqrt7_v_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_v_bf16m4_tumu(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfrsqrt7_v_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfrsqrt7_v_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_v_bf16m8_tumu(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfrsqrt7_v_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfrsqrt7_v_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_v_bf16mf4_mu(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfrsqrt7_v_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfrsqrt7_v_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_v_bf16mf2_mu(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfrsqrt7_v_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfrsqrt7_v_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_v_bf16m1_mu(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfrsqrt7_v_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfrsqrt7_v_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_v_bf16m2_mu(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfrsqrt7_v_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfrsqrt7_v_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_v_bf16m4_mu(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfrsqrt7_v_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfrsqrt7_v_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_v_bf16m8_mu(mask, maskedoff, op1, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfrsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfrsub.c
new file mode 100644
index 0000000000000..7dd2bb6cc502d
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfrsub.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfrsub_vf_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfrsub.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfrsub_vf_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_vf_bf16mf4_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfrsub_vf_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfrsub.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfrsub_vf_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_vf_bf16mf2_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfrsub_vf_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfrsub.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfrsub_vf_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_vf_bf16m1_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfrsub_vf_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfrsub.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfrsub_vf_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_vf_bf16m2_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfrsub_vf_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfrsub.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfrsub_vf_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_vf_bf16m4_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfrsub_vf_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfrsub.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfrsub_vf_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_vf_bf16m8_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfrsub_vf_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfrsub.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfrsub_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_vf_bf16mf4_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfrsub_vf_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfrsub.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfrsub_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_vf_bf16mf2_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfrsub_vf_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfrsub.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfrsub_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_vf_bf16m1_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfrsub_vf_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfrsub.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfrsub_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_vf_bf16m2_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfrsub_vf_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfrsub.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfrsub_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_vf_bf16m4_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfrsub_vf_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfrsub.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfrsub_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_vf_bf16m8_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfrsub_vf_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfrsub.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfrsub_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_vf_bf16mf4_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfrsub_vf_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfrsub.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfrsub_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_vf_bf16mf2_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfrsub_vf_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfrsub.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfrsub_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_vf_bf16m1_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfrsub_vf_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfrsub.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfrsub_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_vf_bf16m2_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfrsub_vf_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfrsub.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfrsub_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_vf_bf16m4_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfrsub_vf_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfrsub.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfrsub_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_vf_bf16m8_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfrsub_vf_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfrsub.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfrsub_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_vf_bf16mf4_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfrsub_vf_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfrsub.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfrsub_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_vf_bf16mf2_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfrsub_vf_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfrsub.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfrsub_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_vf_bf16m1_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfrsub_vf_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfrsub.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfrsub_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_vf_bf16m2_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfrsub_vf_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfrsub.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfrsub_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_vf_bf16m4_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfrsub_vf_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfrsub.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfrsub_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_vf_bf16m8_mu(mask, maskedoff, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfsgnj.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfsgnj.c
new file mode 100644
index 0000000000000..b39a0be2238a9
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfsgnj.c
@@ -0,0 +1,489 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnj_vv_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnj.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnj_vv_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfsgnj_vv_bf16mf4_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnj_vf_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnj.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnj_vf_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_vf_bf16mf4_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnj_vv_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnj.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnj_vv_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfsgnj_vv_bf16mf2_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnj_vf_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnj.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnj_vf_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_vf_bf16mf2_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnj_vv_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnj.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnj_vv_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfsgnj_vv_bf16m1_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnj_vf_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnj.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnj_vf_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_vf_bf16m1_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnj_vv_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnj.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnj_vv_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfsgnj_vv_bf16m2_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnj_vf_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnj.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnj_vf_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_vf_bf16m2_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnj_vv_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnj.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnj_vv_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfsgnj_vv_bf16m4_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnj_vf_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnj.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnj_vf_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_vf_bf16m4_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnj_vv_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnj.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnj_vv_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfsgnj_vv_bf16m8_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnj_vf_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnj.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnj_vf_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_vf_bf16m8_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnj_vv_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnj.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnj_vv_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfsgnj_vv_bf16mf4_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnj_vf_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnj.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnj_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_vf_bf16mf4_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnj_vv_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnj.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnj_vv_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfsgnj_vv_bf16mf2_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnj_vf_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnj.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnj_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_vf_bf16mf2_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnj_vv_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnj.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnj_vv_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfsgnj_vv_bf16m1_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnj_vf_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnj.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnj_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_vf_bf16m1_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnj_vv_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnj.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnj_vv_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfsgnj_vv_bf16m2_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnj_vf_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnj.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnj_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_vf_bf16m2_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnj_vv_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnj.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnj_vv_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfsgnj_vv_bf16m4_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnj_vf_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnj.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnj_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_vf_bf16m4_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnj_vv_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnj.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnj_vv_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfsgnj_vv_bf16m8_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnj_vf_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnj.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnj_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_vf_bf16m8_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnj_vv_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnj.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnj_vv_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfsgnj_vv_bf16mf4_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnj_vf_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnj.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnj_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_vf_bf16mf4_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnj_vv_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnj.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnj_vv_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfsgnj_vv_bf16mf2_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnj_vf_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnj.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnj_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_vf_bf16mf2_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnj_vv_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnj.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnj_vv_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfsgnj_vv_bf16m1_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnj_vf_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnj.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnj_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_vf_bf16m1_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnj_vv_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnj.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnj_vv_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfsgnj_vv_bf16m2_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnj_vf_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnj.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnj_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_vf_bf16m2_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnj_vv_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnj.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnj_vv_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfsgnj_vv_bf16m4_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnj_vf_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnj.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnj_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_vf_bf16m4_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnj_vv_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnj.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnj_vv_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfsgnj_vv_bf16m8_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnj_vf_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnj.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnj_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_vf_bf16m8_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnj_vv_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnj.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnj_vv_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfsgnj_vv_bf16mf4_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnj_vf_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnj.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnj_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_vf_bf16mf4_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnj_vv_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnj.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnj_vv_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfsgnj_vv_bf16mf2_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnj_vf_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnj.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnj_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_vf_bf16mf2_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnj_vv_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnj.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnj_vv_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfsgnj_vv_bf16m1_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnj_vf_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnj.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnj_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_vf_bf16m1_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnj_vv_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnj.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnj_vv_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfsgnj_vv_bf16m2_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnj_vf_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnj.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnj_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_vf_bf16m2_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnj_vv_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnj.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnj_vv_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfsgnj_vv_bf16m4_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnj_vf_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnj.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnj_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_vf_bf16m4_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnj_vv_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnj.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnj_vv_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfsgnj_vv_bf16m8_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnj_vf_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnj.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnj_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_vf_bf16m8_mu(mask, maskedoff, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfsgnjn.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfsgnjn.c
new file mode 100644
index 0000000000000..7542e7866c888
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfsgnjn.c
@@ -0,0 +1,489 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnjn_vv_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjn.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnjn_vv_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfsgnjn_vv_bf16mf4_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnjn_vf_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjn.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnjn_vf_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_vf_bf16mf4_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnjn_vv_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjn.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnjn_vv_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfsgnjn_vv_bf16mf2_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnjn_vf_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjn.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnjn_vf_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_vf_bf16mf2_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnjn_vv_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjn.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnjn_vv_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfsgnjn_vv_bf16m1_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnjn_vf_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjn.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnjn_vf_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_vf_bf16m1_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnjn_vv_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjn.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnjn_vv_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfsgnjn_vv_bf16m2_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnjn_vf_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjn.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnjn_vf_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_vf_bf16m2_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnjn_vv_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjn.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnjn_vv_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfsgnjn_vv_bf16m4_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnjn_vf_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjn.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnjn_vf_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_vf_bf16m4_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnjn_vv_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjn.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnjn_vv_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfsgnjn_vv_bf16m8_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnjn_vf_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjn.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnjn_vf_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_vf_bf16m8_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnjn_vv_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnjn_vv_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfsgnjn_vv_bf16mf4_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnjn_vf_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnjn_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_vf_bf16mf4_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnjn_vv_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnjn_vv_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfsgnjn_vv_bf16mf2_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnjn_vf_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnjn_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_vf_bf16mf2_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnjn_vv_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnjn_vv_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfsgnjn_vv_bf16m1_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnjn_vf_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnjn_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_vf_bf16m1_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnjn_vv_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnjn_vv_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfsgnjn_vv_bf16m2_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnjn_vf_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnjn_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_vf_bf16m2_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnjn_vv_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnjn_vv_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfsgnjn_vv_bf16m4_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnjn_vf_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnjn_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_vf_bf16m4_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnjn_vv_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnjn_vv_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfsgnjn_vv_bf16m8_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnjn_vf_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnjn_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_vf_bf16m8_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnjn_vv_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnjn_vv_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfsgnjn_vv_bf16mf4_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnjn_vf_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnjn_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_vf_bf16mf4_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnjn_vv_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnjn_vv_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfsgnjn_vv_bf16mf2_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnjn_vf_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnjn_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_vf_bf16mf2_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnjn_vv_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnjn_vv_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfsgnjn_vv_bf16m1_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnjn_vf_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnjn_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_vf_bf16m1_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnjn_vv_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnjn_vv_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfsgnjn_vv_bf16m2_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnjn_vf_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnjn_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_vf_bf16m2_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnjn_vv_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnjn_vv_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfsgnjn_vv_bf16m4_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnjn_vf_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnjn_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_vf_bf16m4_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnjn_vv_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnjn_vv_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfsgnjn_vv_bf16m8_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnjn_vf_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnjn_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_vf_bf16m8_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnjn_vv_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnjn_vv_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfsgnjn_vv_bf16mf4_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnjn_vf_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnjn_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_vf_bf16mf4_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnjn_vv_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnjn_vv_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfsgnjn_vv_bf16mf2_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnjn_vf_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnjn_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_vf_bf16mf2_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnjn_vv_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnjn_vv_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfsgnjn_vv_bf16m1_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnjn_vf_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnjn_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_vf_bf16m1_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnjn_vv_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnjn_vv_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfsgnjn_vv_bf16m2_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnjn_vf_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnjn_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_vf_bf16m2_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnjn_vv_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnjn_vv_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfsgnjn_vv_bf16m4_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnjn_vf_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnjn_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_vf_bf16m4_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnjn_vv_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnjn_vv_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfsgnjn_vv_bf16m8_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnjn_vf_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnjn_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_vf_bf16m8_mu(mask, maskedoff, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfsgnjx.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfsgnjx.c
new file mode 100644
index 0000000000000..104149e1b2fa4
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfsgnjx.c
@@ -0,0 +1,489 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnjx_vv_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjx.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnjx_vv_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfsgnjx_vv_bf16mf4_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnjx_vf_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjx.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnjx_vf_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_vf_bf16mf4_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnjx_vv_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjx.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnjx_vv_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfsgnjx_vv_bf16mf2_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnjx_vf_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjx.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnjx_vf_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_vf_bf16mf2_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnjx_vv_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjx.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnjx_vv_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfsgnjx_vv_bf16m1_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnjx_vf_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjx.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnjx_vf_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_vf_bf16m1_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnjx_vv_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjx.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnjx_vv_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfsgnjx_vv_bf16m2_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnjx_vf_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjx.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnjx_vf_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_vf_bf16m2_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnjx_vv_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjx.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnjx_vv_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfsgnjx_vv_bf16m4_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnjx_vf_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjx.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnjx_vf_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_vf_bf16m4_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnjx_vv_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjx.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnjx_vv_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfsgnjx_vv_bf16m8_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnjx_vf_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjx.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnjx_vf_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_vf_bf16m8_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnjx_vv_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnjx_vv_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfsgnjx_vv_bf16mf4_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnjx_vf_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnjx_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_vf_bf16mf4_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnjx_vv_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnjx_vv_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfsgnjx_vv_bf16mf2_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnjx_vf_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnjx_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_vf_bf16mf2_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnjx_vv_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnjx_vv_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfsgnjx_vv_bf16m1_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnjx_vf_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnjx_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_vf_bf16m1_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnjx_vv_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnjx_vv_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfsgnjx_vv_bf16m2_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnjx_vf_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnjx_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_vf_bf16m2_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnjx_vv_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnjx_vv_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfsgnjx_vv_bf16m4_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnjx_vf_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnjx_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_vf_bf16m4_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnjx_vv_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnjx_vv_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfsgnjx_vv_bf16m8_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnjx_vf_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnjx_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_vf_bf16m8_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnjx_vv_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnjx_vv_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfsgnjx_vv_bf16mf4_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnjx_vf_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnjx_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_vf_bf16mf4_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnjx_vv_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnjx_vv_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfsgnjx_vv_bf16mf2_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnjx_vf_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnjx_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_vf_bf16mf2_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnjx_vv_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnjx_vv_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfsgnjx_vv_bf16m1_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnjx_vf_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnjx_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_vf_bf16m1_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnjx_vv_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnjx_vv_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfsgnjx_vv_bf16m2_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnjx_vf_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnjx_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_vf_bf16m2_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnjx_vv_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnjx_vv_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfsgnjx_vv_bf16m4_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnjx_vf_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnjx_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_vf_bf16m4_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnjx_vv_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnjx_vv_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfsgnjx_vv_bf16m8_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnjx_vf_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnjx_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_vf_bf16m8_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnjx_vv_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnjx_vv_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfsgnjx_vv_bf16mf4_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnjx_vf_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnjx_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_vf_bf16mf4_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnjx_vv_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnjx_vv_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfsgnjx_vv_bf16mf2_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnjx_vf_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnjx_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_vf_bf16mf2_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnjx_vv_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnjx_vv_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfsgnjx_vv_bf16m1_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnjx_vf_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnjx_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_vf_bf16m1_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnjx_vv_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnjx_vv_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfsgnjx_vv_bf16m2_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnjx_vf_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnjx_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_vf_bf16m2_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnjx_vv_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnjx_vv_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfsgnjx_vv_bf16m4_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnjx_vf_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnjx_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_vf_bf16m4_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnjx_vv_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnjx_vv_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfsgnjx_vv_bf16m8_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnjx_vf_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnjx_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_vf_bf16m8_mu(mask, maskedoff, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfslide1down.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfslide1down.c
new file mode 100644
index 0000000000000..228dc1cd064a8
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfslide1down.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfslide1down_vf_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfslide1down.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[SRC]], bfloat [[VALUE]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfslide1down_vf_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_vf_bf16mf4_tu(maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfslide1down_vf_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfslide1down.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[SRC]], bfloat [[VALUE]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfslide1down_vf_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_vf_bf16mf2_tu(maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfslide1down_vf_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfslide1down.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[SRC]], bfloat [[VALUE]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfslide1down_vf_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_vf_bf16m1_tu(maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfslide1down_vf_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfslide1down.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[SRC]], bfloat [[VALUE]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfslide1down_vf_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_vf_bf16m2_tu(maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfslide1down_vf_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfslide1down.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[SRC]], bfloat [[VALUE]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfslide1down_vf_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_vf_bf16m4_tu(maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfslide1down_vf_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfslide1down.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[SRC]], bfloat [[VALUE]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfslide1down_vf_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_vf_bf16m8_tu(maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfslide1down_vf_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfslide1down.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfslide1down_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_vf_bf16mf4_tum(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfslide1down_vf_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfslide1down.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfslide1down_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_vf_bf16mf2_tum(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfslide1down_vf_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfslide1down.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfslide1down_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_vf_bf16m1_tum(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfslide1down_vf_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfslide1down.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfslide1down_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_vf_bf16m2_tum(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfslide1down_vf_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfslide1down.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfslide1down_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_vf_bf16m4_tum(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfslide1down_vf_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfslide1down.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfslide1down_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_vf_bf16m8_tum(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfslide1down_vf_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfslide1down.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfslide1down_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_vf_bf16mf4_tumu(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfslide1down_vf_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfslide1down.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfslide1down_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_vf_bf16mf2_tumu(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfslide1down_vf_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfslide1down.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfslide1down_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_vf_bf16m1_tumu(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfslide1down_vf_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfslide1down.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfslide1down_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_vf_bf16m2_tumu(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfslide1down_vf_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfslide1down.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfslide1down_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_vf_bf16m4_tumu(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfslide1down_vf_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfslide1down.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfslide1down_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_vf_bf16m8_tumu(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfslide1down_vf_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfslide1down.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfslide1down_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_vf_bf16mf4_mu(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfslide1down_vf_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfslide1down.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfslide1down_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_vf_bf16mf2_mu(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfslide1down_vf_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfslide1down.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfslide1down_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_vf_bf16m1_mu(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfslide1down_vf_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfslide1down.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfslide1down_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_vf_bf16m2_mu(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfslide1down_vf_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfslide1down.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfslide1down_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_vf_bf16m4_mu(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfslide1down_vf_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfslide1down.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfslide1down_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_vf_bf16m8_mu(mask, maskedoff, src, value, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfslide1up.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfslide1up.c
new file mode 100644
index 0000000000000..9e6ff2b16f77d
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfslide1up.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfslide1up_vf_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfslide1up.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[SRC]], bfloat [[VALUE]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfslide1up_vf_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_vf_bf16mf4_tu(maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfslide1up_vf_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfslide1up.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[SRC]], bfloat [[VALUE]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfslide1up_vf_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_vf_bf16mf2_tu(maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfslide1up_vf_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfslide1up.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[SRC]], bfloat [[VALUE]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfslide1up_vf_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_vf_bf16m1_tu(maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfslide1up_vf_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfslide1up.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[SRC]], bfloat [[VALUE]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfslide1up_vf_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_vf_bf16m2_tu(maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfslide1up_vf_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfslide1up.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[SRC]], bfloat [[VALUE]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfslide1up_vf_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_vf_bf16m4_tu(maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfslide1up_vf_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfslide1up.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[SRC]], bfloat [[VALUE]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfslide1up_vf_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_vf_bf16m8_tu(maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfslide1up_vf_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfslide1up.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfslide1up_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_vf_bf16mf4_tum(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfslide1up_vf_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfslide1up.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfslide1up_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_vf_bf16mf2_tum(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfslide1up_vf_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfslide1up.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfslide1up_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_vf_bf16m1_tum(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfslide1up_vf_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfslide1up.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfslide1up_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_vf_bf16m2_tum(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfslide1up_vf_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfslide1up.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfslide1up_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_vf_bf16m4_tum(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfslide1up_vf_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfslide1up.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfslide1up_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_vf_bf16m8_tum(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfslide1up_vf_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfslide1up.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfslide1up_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_vf_bf16mf4_tumu(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfslide1up_vf_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfslide1up.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfslide1up_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_vf_bf16mf2_tumu(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfslide1up_vf_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfslide1up.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfslide1up_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_vf_bf16m1_tumu(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfslide1up_vf_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfslide1up.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfslide1up_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_vf_bf16m2_tumu(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfslide1up_vf_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfslide1up.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfslide1up_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_vf_bf16m4_tumu(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfslide1up_vf_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfslide1up.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfslide1up_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_vf_bf16m8_tumu(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfslide1up_vf_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfslide1up.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfslide1up_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_vf_bf16mf4_mu(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfslide1up_vf_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfslide1up.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfslide1up_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_vf_bf16mf2_mu(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfslide1up_vf_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfslide1up.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfslide1up_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_vf_bf16m1_mu(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfslide1up_vf_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfslide1up.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfslide1up_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_vf_bf16m2_mu(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfslide1up_vf_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfslide1up.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfslide1up_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_vf_bf16m4_mu(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfslide1up_vf_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfslide1up.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfslide1up_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_vf_bf16m8_mu(mask, maskedoff, src, value, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfsub.c
new file mode 100644
index 0000000000000..b6fd94ece20cd
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfsub.c
@@ -0,0 +1,489 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsub_vv_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsub.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsub_vv_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfsub_vv_bf16mf4_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsub_vf_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsub.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsub_vf_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_vf_bf16mf4_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsub_vv_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsub.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsub_vv_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfsub_vv_bf16mf2_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsub_vf_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsub.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsub_vf_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_vf_bf16mf2_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsub_vv_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsub.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsub_vv_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfsub_vv_bf16m1_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsub_vf_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsub.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsub_vf_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_vf_bf16m1_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsub_vv_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsub.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsub_vv_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfsub_vv_bf16m2_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsub_vf_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsub.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsub_vf_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_vf_bf16m2_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsub_vv_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsub.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsub_vv_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfsub_vv_bf16m4_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsub_vf_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsub.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsub_vf_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_vf_bf16m4_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsub_vv_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsub.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsub_vv_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfsub_vv_bf16m8_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsub_vf_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsub.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsub_vf_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_vf_bf16m8_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsub_vv_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsub.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsub_vv_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfsub_vv_bf16mf4_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsub_vf_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsub.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsub_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_vf_bf16mf4_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsub_vv_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsub.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsub_vv_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfsub_vv_bf16mf2_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsub_vf_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsub.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsub_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_vf_bf16mf2_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsub_vv_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsub.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsub_vv_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfsub_vv_bf16m1_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsub_vf_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsub.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsub_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_vf_bf16m1_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsub_vv_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsub.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsub_vv_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfsub_vv_bf16m2_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsub_vf_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsub.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsub_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_vf_bf16m2_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsub_vv_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsub.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsub_vv_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfsub_vv_bf16m4_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsub_vf_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsub.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsub_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_vf_bf16m4_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsub_vv_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsub.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsub_vv_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfsub_vv_bf16m8_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsub_vf_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsub.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsub_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_vf_bf16m8_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsub_vv_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsub.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsub_vv_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfsub_vv_bf16mf4_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsub_vf_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsub.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsub_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_vf_bf16mf4_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsub_vv_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsub.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsub_vv_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfsub_vv_bf16mf2_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsub_vf_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsub.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsub_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_vf_bf16mf2_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsub_vv_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsub.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsub_vv_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfsub_vv_bf16m1_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsub_vf_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsub.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsub_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_vf_bf16m1_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsub_vv_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsub.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsub_vv_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfsub_vv_bf16m2_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsub_vf_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsub.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsub_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_vf_bf16m2_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsub_vv_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsub.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsub_vv_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfsub_vv_bf16m4_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsub_vf_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsub.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsub_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_vf_bf16m4_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsub_vv_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsub.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsub_vv_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfsub_vv_bf16m8_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsub_vf_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsub.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsub_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_vf_bf16m8_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsub_vv_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsub.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsub_vv_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfsub_vv_bf16mf4_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsub_vf_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsub.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsub_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_vf_bf16mf4_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsub_vv_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsub.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsub_vv_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfsub_vv_bf16mf2_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsub_vf_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsub.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsub_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_vf_bf16mf2_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsub_vv_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsub.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsub_vv_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfsub_vv_bf16m1_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsub_vf_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsub.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsub_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_vf_bf16m1_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsub_vv_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsub.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsub_vv_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfsub_vv_bf16m2_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsub_vf_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsub.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsub_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_vf_bf16m2_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsub_vv_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsub.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsub_vv_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfsub_vv_bf16m4_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsub_vf_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsub.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsub_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_vf_bf16m4_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsub_vv_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsub.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsub_vv_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfsub_vv_bf16m8_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsub_vf_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsub.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsub_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_vf_bf16m8_mu(mask, maskedoff, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfwadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfwadd.c
new file mode 100644
index 0000000000000..4bee376cfe0fb
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfwadd.c
@@ -0,0 +1,2007 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_vv_bf16mf4_f32mf2_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_vv_bf16mf4_f32mf2_tu(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_bf16mf4_f32mf2_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_vf_bf16mf4_f32mf2_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_vf_bf16mf4_f32mf2_tu(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_bf16mf4_f32mf2_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_wv_bf16mf4_f32mf2_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_wv_bf16mf4_f32mf2_tu(vfloat32mf2_t vd,
+ vfloat32mf2_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_bf16mf4_f32mf2_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_wf_bf16_f32mf2_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.nxv1f32.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_wf_bf16_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32mf2_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_vv_bf16mf2_f32m1_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_vv_bf16mf2_f32m1_tu(vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_bf16mf2_f32m1_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_vf_bf16mf2_f32m1_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_vf_bf16mf2_f32m1_tu(vfloat32m1_t vd,
+ vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_bf16mf2_f32m1_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_wv_bf16mf2_f32m1_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_wv_bf16mf2_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_bf16mf2_f32m1_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_wf_bf16_f32m1_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.nxv2f32.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_wf_bf16_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32m1_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_vv_bf16m1_f32m2_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_vv_bf16m1_f32m2_tu(vfloat32m2_t vd, vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_bf16m1_f32m2_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_vf_bf16m1_f32m2_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_vf_bf16m1_f32m2_tu(vfloat32m2_t vd, vbfloat16m1_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_vf_bf16m1_f32m2_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_wv_bf16m1_f32m2_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_wv_bf16m1_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_bf16m1_f32m2_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_wf_bf16_f32m2_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.nxv4f32.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_wf_bf16_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32m2_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_vv_bf16m2_f32m4_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_vv_bf16m2_f32m4_tu(vfloat32m4_t vd, vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_bf16m2_f32m4_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_vf_bf16m2_f32m4_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_vf_bf16m2_f32m4_tu(vfloat32m4_t vd, vbfloat16m2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_vf_bf16m2_f32m4_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_wv_bf16m2_f32m4_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_wv_bf16m2_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_bf16m2_f32m4_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_wf_bf16_f32m4_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.nxv8f32.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_wf_bf16_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32m4_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_vv_bf16m4_f32m8_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_vv_bf16m4_f32m8_tu(vfloat32m8_t vd, vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_bf16m4_f32m8_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_vf_bf16m4_f32m8_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_vf_bf16m4_f32m8_tu(vfloat32m8_t vd, vbfloat16m4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_vf_bf16m4_f32m8_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_wv_bf16m4_f32m8_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_wv_bf16m4_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_bf16m4_f32m8_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_wf_bf16_f32m8_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.nxv16f32.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_wf_bf16_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32m8_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_vv_bf16mf4_f32mf2_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_vv_bf16mf4_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_bf16mf4_f32mf2_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_vf_bf16mf4_f32mf2_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_vf_bf16mf4_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_bf16mf4_f32mf2_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_wv_bf16mf4_f32mf2_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_wv_bf16mf4_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd,
+ vfloat32mf2_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_bf16mf4_f32mf2_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_wf_bf16_f32mf2_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f32.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_wf_bf16_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd,
+ vfloat32mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32mf2_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_vv_bf16mf2_f32m1_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_vv_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_bf16mf2_f32m1_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_vf_bf16mf2_f32m1_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_vf_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_bf16mf2_f32m1_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_wv_bf16mf2_f32m1_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_wv_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd,
+ vfloat32m1_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_bf16mf2_f32m1_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_wf_bf16_f32m1_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f32.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_wf_bf16_f32m1_tum(vbool32_t vm, vfloat32m1_t vd,
+ vfloat32m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32m1_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_vv_bf16m1_f32m2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_vv_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_bf16m1_f32m2_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_vf_bf16m1_f32m2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_vf_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_bf16m1_f32m2_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_wv_bf16m1_f32m2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_wv_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd,
+ vfloat32m2_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_bf16m1_f32m2_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_wf_bf16_f32m2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f32.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_wf_bf16_f32m2_tum(vbool16_t vm, vfloat32m2_t vd,
+ vfloat32m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32m2_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_vv_bf16m2_f32m4_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_vv_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_bf16m2_f32m4_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_vf_bf16m2_f32m4_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_vf_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_bf16m2_f32m4_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_wv_bf16m2_f32m4_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_wv_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd,
+ vfloat32m4_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_bf16m2_f32m4_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_wf_bf16_f32m4_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f32.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_wf_bf16_f32m4_tum(vbool8_t vm, vfloat32m4_t vd,
+ vfloat32m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32m4_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_vv_bf16m4_f32m8_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_vv_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_bf16m4_f32m8_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_vf_bf16m4_f32m8_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_vf_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_bf16m4_f32m8_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_wv_bf16m4_f32m8_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_wv_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd,
+ vfloat32m8_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_bf16m4_f32m8_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_wf_bf16_f32m8_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f32.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_wf_bf16_f32m8_tum(vbool4_t vm, vfloat32m8_t vd,
+ vfloat32m8_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32m8_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_vv_bf16mf4_f32mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_vv_bf16mf4_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_vv_bf16mf4_f32mf2_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_vf_bf16mf4_f32mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_vf_bf16mf4_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_bf16mf4_f32mf2_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_wv_bf16mf4_f32mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_wv_bf16mf4_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd,
+ vfloat32mf2_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_wv_bf16mf4_f32mf2_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_wf_bf16_f32mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f32.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_wf_bf16_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd,
+ vfloat32mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32mf2_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_vv_bf16mf2_f32m1_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_vv_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_bf16mf2_f32m1_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_vf_bf16mf2_f32m1_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_vf_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_bf16mf2_f32m1_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_wv_bf16mf2_f32m1_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_wv_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vfloat32m1_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_bf16mf2_f32m1_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_wf_bf16_f32m1_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f32.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_wf_bf16_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vfloat32m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32m1_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_vv_bf16m1_f32m2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_vv_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_bf16m1_f32m2_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_vf_bf16m1_f32m2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_vf_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_bf16m1_f32m2_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_wv_bf16m1_f32m2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_wv_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vfloat32m2_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_bf16m1_f32m2_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_wf_bf16_f32m2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f32.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_wf_bf16_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vfloat32m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32m2_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_vv_bf16m2_f32m4_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_vv_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_bf16m2_f32m4_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_vf_bf16m2_f32m4_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_vf_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_bf16m2_f32m4_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_wv_bf16m2_f32m4_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_wv_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vfloat32m4_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_bf16m2_f32m4_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_wf_bf16_f32m4_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f32.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_wf_bf16_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vfloat32m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32m4_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_vv_bf16m4_f32m8_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_vv_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_bf16m4_f32m8_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_vf_bf16m4_f32m8_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_vf_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_bf16m4_f32m8_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_wv_bf16m4_f32m8_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_wv_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vfloat32m8_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_bf16m4_f32m8_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_wf_bf16_f32m8_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f32.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_wf_bf16_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vfloat32m8_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32m8_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_vv_bf16mf4_f32mf2_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_vv_bf16mf4_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_bf16mf4_f32mf2_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_vf_bf16mf4_f32mf2_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_vf_bf16mf4_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_bf16mf4_f32mf2_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_wv_bf16mf4_f32mf2_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_wv_bf16mf4_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd,
+ vfloat32mf2_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_bf16mf4_f32mf2_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_wf_bf16_f32mf2_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f32.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_wf_bf16_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd,
+ vfloat32mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32mf2_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_vv_bf16mf2_f32m1_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_vv_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_bf16mf2_f32m1_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_vf_bf16mf2_f32m1_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_vf_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_bf16mf2_f32m1_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_wv_bf16mf2_f32m1_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_wv_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd,
+ vfloat32m1_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_bf16mf2_f32m1_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_wf_bf16_f32m1_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f32.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_wf_bf16_f32m1_mu(vbool32_t vm, vfloat32m1_t vd,
+ vfloat32m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32m1_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_vv_bf16m1_f32m2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_vv_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_bf16m1_f32m2_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_vf_bf16m1_f32m2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_vf_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_bf16m1_f32m2_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_wv_bf16m1_f32m2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_wv_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd,
+ vfloat32m2_t vs2, vbfloat16m1_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_wv_bf16m1_f32m2_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_wf_bf16_f32m2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f32.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_wf_bf16_f32m2_mu(vbool16_t vm, vfloat32m2_t vd,
+ vfloat32m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32m2_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_vv_bf16m2_f32m4_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_vv_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_bf16m2_f32m4_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_vf_bf16m2_f32m4_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_vf_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_bf16m2_f32m4_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_wv_bf16m2_f32m4_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_wv_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd,
+ vfloat32m4_t vs2, vbfloat16m2_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_wv_bf16m2_f32m4_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_wf_bf16_f32m4_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f32.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_wf_bf16_f32m4_mu(vbool8_t vm, vfloat32m4_t vd,
+ vfloat32m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32m4_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_vv_bf16m4_f32m8_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_vv_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_bf16m4_f32m8_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_vf_bf16m4_f32m8_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_vf_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_bf16m4_f32m8_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_wv_bf16m4_f32m8_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_wv_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd,
+ vfloat32m8_t vs2, vbfloat16m4_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_wv_bf16m4_f32m8_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_wf_bf16_f32m8_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f32.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_wf_bf16_f32m8_mu(vbool4_t vm, vfloat32m8_t vd,
+ vfloat32m8_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32m8_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_vv_bf16mf4_f32mf2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_vv_bf16mf4_f32mf2_rm_tu(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_vv_bf16mf4_f32mf2_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_vf_bf16mf4_f32mf2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_vf_bf16mf4_f32mf2_rm_tu(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_vf_bf16mf4_f32mf2_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_wv_bf16mf4_f32mf2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_wv_bf16mf4_f32mf2_rm_tu(vfloat32mf2_t vd,
+ vfloat32mf2_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_wv_bf16mf4_f32mf2_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_wf_bf16_f32mf2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.nxv1f32.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_wf_bf16_f32mf2_rm_tu(vfloat32mf2_t vd,
+ vfloat32mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32mf2_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_vv_bf16mf2_f32m1_rm_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_vv_bf16mf2_f32m1_rm_tu(vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_bf16mf2_f32m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_vf_bf16mf2_f32m1_rm_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_vf_bf16mf2_f32m1_rm_tu(vfloat32m1_t vd,
+ vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_bf16mf2_f32m1_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_wv_bf16mf2_f32m1_rm_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_wv_bf16mf2_f32m1_rm_tu(vfloat32m1_t vd,
+ vfloat32m1_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_bf16mf2_f32m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_wf_bf16_f32m1_rm_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.nxv2f32.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_wf_bf16_f32m1_rm_tu(vfloat32m1_t vd, vfloat32m1_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32m1_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_vv_bf16m1_f32m2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_vv_bf16m1_f32m2_rm_tu(vfloat32m2_t vd,
+ vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_bf16m1_f32m2_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_vf_bf16m1_f32m2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_vf_bf16m1_f32m2_rm_tu(vfloat32m2_t vd,
+ vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_bf16m1_f32m2_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_wv_bf16m1_f32m2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_wv_bf16m1_f32m2_rm_tu(vfloat32m2_t vd,
+ vfloat32m2_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_bf16m1_f32m2_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_wf_bf16_f32m2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.nxv4f32.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_wf_bf16_f32m2_rm_tu(vfloat32m2_t vd, vfloat32m2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32m2_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_vv_bf16m2_f32m4_rm_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_vv_bf16m2_f32m4_rm_tu(vfloat32m4_t vd,
+ vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_bf16m2_f32m4_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_vf_bf16m2_f32m4_rm_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_vf_bf16m2_f32m4_rm_tu(vfloat32m4_t vd,
+ vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_bf16m2_f32m4_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_wv_bf16m2_f32m4_rm_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_wv_bf16m2_f32m4_rm_tu(vfloat32m4_t vd,
+ vfloat32m4_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_bf16m2_f32m4_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_wf_bf16_f32m4_rm_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.nxv8f32.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_wf_bf16_f32m4_rm_tu(vfloat32m4_t vd, vfloat32m4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32m4_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_vv_bf16m4_f32m8_rm_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_vv_bf16m4_f32m8_rm_tu(vfloat32m8_t vd,
+ vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_bf16m4_f32m8_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_vf_bf16m4_f32m8_rm_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_vf_bf16m4_f32m8_rm_tu(vfloat32m8_t vd,
+ vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_bf16m4_f32m8_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_wv_bf16m4_f32m8_rm_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_wv_bf16m4_f32m8_rm_tu(vfloat32m8_t vd,
+ vfloat32m8_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_bf16m4_f32m8_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_wf_bf16_f32m8_rm_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.nxv16f32.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_wf_bf16_f32m8_rm_tu(vfloat32m8_t vd, vfloat32m8_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32m8_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_vv_bf16mf4_f32mf2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_vv_bf16mf4_f32mf2_rm_tum(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_vv_bf16mf4_f32mf2_rm_tum(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_vf_bf16mf4_f32mf2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_vf_bf16mf4_f32mf2_rm_tum(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_vf_bf16mf4_f32mf2_rm_tum(vm, vd, vs2, rs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_wv_bf16mf4_f32mf2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_wv_bf16mf4_f32mf2_rm_tum(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vfloat32mf2_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_wv_bf16mf4_f32mf2_rm_tum(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_wf_bf16_f32mf2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f32.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_wf_bf16_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd,
+ vfloat32mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32mf2_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_vv_bf16mf2_f32m1_rm_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_vv_bf16mf2_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_vv_bf16mf2_f32m1_rm_tum(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_vf_bf16mf2_f32m1_rm_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_vf_bf16mf2_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_bf16mf2_f32m1_rm_tum(vm, vd, vs2, rs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_wv_bf16mf2_f32m1_rm_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_wv_bf16mf2_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd,
+ vfloat32m1_t vs2,
+ vbfloat16mf2_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_wv_bf16mf2_f32m1_rm_tum(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_wf_bf16_f32m1_rm_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f32.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_wf_bf16_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd,
+ vfloat32m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32m1_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_vv_bf16m1_f32m2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_vv_bf16m1_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_bf16m1_f32m2_rm_tum(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_vf_bf16m1_f32m2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_vf_bf16m1_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_bf16m1_f32m2_rm_tum(vm, vd, vs2, rs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_wv_bf16m1_f32m2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_wv_bf16m1_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd,
+ vfloat32m2_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_bf16m1_f32m2_rm_tum(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_wf_bf16_f32m2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f32.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_wf_bf16_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd,
+ vfloat32m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32m2_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_vv_bf16m2_f32m4_rm_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_vv_bf16m2_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_bf16m2_f32m4_rm_tum(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_vf_bf16m2_f32m4_rm_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_vf_bf16m2_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_bf16m2_f32m4_rm_tum(vm, vd, vs2, rs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_wv_bf16m2_f32m4_rm_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_wv_bf16m2_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd,
+ vfloat32m4_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_bf16m2_f32m4_rm_tum(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_wf_bf16_f32m4_rm_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f32.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_wf_bf16_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd,
+ vfloat32m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32m4_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_vv_bf16m4_f32m8_rm_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_vv_bf16m4_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_bf16m4_f32m8_rm_tum(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_vf_bf16m4_f32m8_rm_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_vf_bf16m4_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_bf16m4_f32m8_rm_tum(vm, vd, vs2, rs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_wv_bf16m4_f32m8_rm_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_wv_bf16m4_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd,
+ vfloat32m8_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_bf16m4_f32m8_rm_tum(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_wf_bf16_f32m8_rm_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f32.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_wf_bf16_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd,
+ vfloat32m8_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32m8_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_vv_bf16mf4_f32mf2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_vv_bf16mf4_f32mf2_rm_tumu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_vv_bf16mf4_f32mf2_rm_tumu(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_vf_bf16mf4_f32mf2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_vf_bf16mf4_f32mf2_rm_tumu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_vf_bf16mf4_f32mf2_rm_tumu(vm, vd, vs2, rs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_wv_bf16mf4_f32mf2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_wv_bf16mf4_f32mf2_rm_tumu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vfloat32mf2_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_wv_bf16mf4_f32mf2_rm_tumu(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_wf_bf16_f32mf2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f32.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_wf_bf16_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd,
+ vfloat32mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32mf2_rm_tumu(vm, vd, vs2, rs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_vv_bf16mf2_f32m1_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_vv_bf16mf2_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_vv_bf16mf2_f32m1_rm_tumu(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_vf_bf16mf2_f32m1_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_vf_bf16mf2_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_vf_bf16mf2_f32m1_rm_tumu(vm, vd, vs2, rs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_wv_bf16mf2_f32m1_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_wv_bf16mf2_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vfloat32m1_t vs2,
+ vbfloat16mf2_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_wv_bf16mf2_f32m1_rm_tumu(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_wf_bf16_f32m1_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f32.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_wf_bf16_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vfloat32m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32m1_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_vv_bf16m1_f32m2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_vv_bf16m1_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_bf16m1_f32m2_rm_tumu(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_vf_bf16m1_f32m2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_vf_bf16m1_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_bf16m1_f32m2_rm_tumu(vm, vd, vs2, rs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_wv_bf16m1_f32m2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_wv_bf16m1_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vfloat32m2_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_bf16m1_f32m2_rm_tumu(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_wf_bf16_f32m2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f32.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_wf_bf16_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vfloat32m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32m2_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_vv_bf16m2_f32m4_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_vv_bf16m2_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_bf16m2_f32m4_rm_tumu(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_vf_bf16m2_f32m4_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_vf_bf16m2_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_bf16m2_f32m4_rm_tumu(vm, vd, vs2, rs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_wv_bf16m2_f32m4_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_wv_bf16m2_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vfloat32m4_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_bf16m2_f32m4_rm_tumu(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_wf_bf16_f32m4_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f32.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_wf_bf16_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vfloat32m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32m4_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_vv_bf16m4_f32m8_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_vv_bf16m4_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_bf16m4_f32m8_rm_tumu(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_vf_bf16m4_f32m8_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_vf_bf16m4_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_bf16m4_f32m8_rm_tumu(vm, vd, vs2, rs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_wv_bf16m4_f32m8_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_wv_bf16m4_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vfloat32m8_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_bf16m4_f32m8_rm_tumu(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_wf_bf16_f32m8_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f32.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_wf_bf16_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vfloat32m8_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32m8_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_vv_bf16mf4_f32mf2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_vv_bf16mf4_f32mf2_rm_mu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_vv_bf16mf4_f32mf2_rm_mu(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_vf_bf16mf4_f32mf2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_vf_bf16mf4_f32mf2_rm_mu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_vf_bf16mf4_f32mf2_rm_mu(vm, vd, vs2, rs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_wv_bf16mf4_f32mf2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_wv_bf16mf4_f32mf2_rm_mu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vfloat32mf2_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_wv_bf16mf4_f32mf2_rm_mu(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_wf_bf16_f32mf2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f32.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_wf_bf16_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd,
+ vfloat32mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32mf2_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_vv_bf16mf2_f32m1_rm_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_vv_bf16mf2_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_bf16mf2_f32m1_rm_mu(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_vf_bf16mf2_f32m1_rm_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_vf_bf16mf2_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_bf16mf2_f32m1_rm_mu(vm, vd, vs2, rs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_wv_bf16mf2_f32m1_rm_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_wv_bf16mf2_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd,
+ vfloat32m1_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_bf16mf2_f32m1_rm_mu(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_wf_bf16_f32m1_rm_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f32.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_wf_bf16_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd,
+ vfloat32m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32m1_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_vv_bf16m1_f32m2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_vv_bf16m1_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_bf16m1_f32m2_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_vf_bf16m1_f32m2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_vf_bf16m1_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_bf16m1_f32m2_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_wv_bf16m1_f32m2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_wv_bf16m1_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd,
+ vfloat32m2_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_bf16m1_f32m2_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_wf_bf16_f32m2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f32.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_wf_bf16_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd,
+ vfloat32m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32m2_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_vv_bf16m2_f32m4_rm_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_vv_bf16m2_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_bf16m2_f32m4_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_vf_bf16m2_f32m4_rm_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_vf_bf16m2_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_bf16m2_f32m4_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_wv_bf16m2_f32m4_rm_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_wv_bf16m2_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd,
+ vfloat32m4_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_bf16m2_f32m4_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_wf_bf16_f32m4_rm_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f32.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_wf_bf16_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd,
+ vfloat32m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32m4_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_vv_bf16m4_f32m8_rm_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_vv_bf16m4_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_bf16m4_f32m8_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_vf_bf16m4_f32m8_rm_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_vf_bf16m4_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_bf16m4_f32m8_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_wv_bf16m4_f32m8_rm_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_wv_bf16m4_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd,
+ vfloat32m8_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_bf16m4_f32m8_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_wf_bf16_f32m8_rm_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f32.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_wf_bf16_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd,
+ vfloat32m8_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_bf16_f32m8_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfwcvt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfwcvt.c
new file mode 100644
index 0000000000000..9151319fcfb17
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfwcvt.c
@@ -0,0 +1,765 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfwcvt_f_x_v_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfwcvt.f.x.v.nxv1bf16.nxv1i8.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfwcvt_f_x_v_bf16mf4_tu(vbfloat16mf4_t vd, vint8mf8_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_x_v_bf16mf4_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfwcvt_f_x_v_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfwcvt.f.x.v.nxv2bf16.nxv2i8.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfwcvt_f_x_v_bf16mf2_tu(vbfloat16mf2_t vd, vint8mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_x_v_bf16mf2_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfwcvt_f_x_v_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfwcvt.f.x.v.nxv4bf16.nxv4i8.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfwcvt_f_x_v_bf16m1_tu(vbfloat16m1_t vd, vint8mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_x_v_bf16m1_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfwcvt_f_x_v_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfwcvt.f.x.v.nxv8bf16.nxv8i8.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfwcvt_f_x_v_bf16m2_tu(vbfloat16m2_t vd, vint8m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_x_v_bf16m2_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfwcvt_f_x_v_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfwcvt.f.x.v.nxv16bf16.nxv16i8.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfwcvt_f_x_v_bf16m4_tu(vbfloat16m4_t vd, vint8m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_x_v_bf16m4_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfwcvt_f_x_v_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfwcvt.f.x.v.nxv32bf16.nxv32i8.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfwcvt_f_x_v_bf16m8_tu(vbfloat16m8_t vd, vint8m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_x_v_bf16m8_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfwcvt_f_xu_v_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.nxv1bf16.nxv1i8.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfwcvt_f_xu_v_bf16mf4_tu(vbfloat16mf4_t vd, vuint8mf8_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_xu_v_bf16mf4_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfwcvt_f_xu_v_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.nxv2bf16.nxv2i8.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfwcvt_f_xu_v_bf16mf2_tu(vbfloat16mf2_t vd, vuint8mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_xu_v_bf16mf2_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfwcvt_f_xu_v_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.nxv4bf16.nxv4i8.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfwcvt_f_xu_v_bf16m1_tu(vbfloat16m1_t vd, vuint8mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_xu_v_bf16m1_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfwcvt_f_xu_v_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.nxv8bf16.nxv8i8.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfwcvt_f_xu_v_bf16m2_tu(vbfloat16m2_t vd, vuint8m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_xu_v_bf16m2_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfwcvt_f_xu_v_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.nxv16bf16.nxv16i8.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfwcvt_f_xu_v_bf16m4_tu(vbfloat16m4_t vd, vuint8m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_xu_v_bf16m4_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfwcvt_f_xu_v_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.nxv32bf16.nxv32i8.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfwcvt_f_xu_v_bf16m8_tu(vbfloat16m8_t vd, vuint8m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_xu_v_bf16m8_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwcvt_f_f_v_bf16mf4_f32mf2_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwcvt.f.f.v.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwcvt_f_f_v_bf16mf4_f32mf2_tu(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_f_v_bf16mf4_f32mf2_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwcvt_f_f_v_bf16mf2_f32m1_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwcvt.f.f.v.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwcvt_f_f_v_bf16mf2_f32m1_tu(vfloat32m1_t vd,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_f_v_bf16mf2_f32m1_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwcvt_f_f_v_bf16m1_f32m2_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwcvt.f.f.v.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwcvt_f_f_v_bf16m1_f32m2_tu(vfloat32m2_t vd,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_f_v_bf16m1_f32m2_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwcvt_f_f_v_bf16m2_f32m4_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwcvt.f.f.v.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwcvt_f_f_v_bf16m2_f32m4_tu(vfloat32m4_t vd,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_f_v_bf16m2_f32m4_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwcvt_f_f_v_bf16m4_f32m8_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwcvt.f.f.v.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwcvt_f_f_v_bf16m4_f32m8_tu(vfloat32m8_t vd,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_f_v_bf16m4_f32m8_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfwcvt_f_x_v_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv1bf16.nxv1i8.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfwcvt_f_x_v_bf16mf4_tum(vbool64_t vm, vbfloat16mf4_t vd,
+ vint8mf8_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_x_v_bf16mf4_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfwcvt_f_x_v_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv2bf16.nxv2i8.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfwcvt_f_x_v_bf16mf2_tum(vbool32_t vm, vbfloat16mf2_t vd,
+ vint8mf4_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_x_v_bf16mf2_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfwcvt_f_x_v_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv4bf16.nxv4i8.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfwcvt_f_x_v_bf16m1_tum(vbool16_t vm, vbfloat16m1_t vd,
+ vint8mf2_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_x_v_bf16m1_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfwcvt_f_x_v_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv8bf16.nxv8i8.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfwcvt_f_x_v_bf16m2_tum(vbool8_t vm, vbfloat16m2_t vd,
+ vint8m1_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_x_v_bf16m2_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfwcvt_f_x_v_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv16bf16.nxv16i8.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfwcvt_f_x_v_bf16m4_tum(vbool4_t vm, vbfloat16m4_t vd,
+ vint8m2_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_x_v_bf16m4_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfwcvt_f_x_v_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv32bf16.nxv32i8.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfwcvt_f_x_v_bf16m8_tum(vbool2_t vm, vbfloat16m8_t vd,
+ vint8m4_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_x_v_bf16m8_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfwcvt_f_xu_v_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv1bf16.nxv1i8.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfwcvt_f_xu_v_bf16mf4_tum(vbool64_t vm, vbfloat16mf4_t vd,
+ vuint8mf8_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_xu_v_bf16mf4_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfwcvt_f_xu_v_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv2bf16.nxv2i8.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfwcvt_f_xu_v_bf16mf2_tum(vbool32_t vm, vbfloat16mf2_t vd,
+ vuint8mf4_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_xu_v_bf16mf2_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfwcvt_f_xu_v_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv4bf16.nxv4i8.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfwcvt_f_xu_v_bf16m1_tum(vbool16_t vm, vbfloat16m1_t vd,
+ vuint8mf2_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_xu_v_bf16m1_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfwcvt_f_xu_v_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv8bf16.nxv8i8.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfwcvt_f_xu_v_bf16m2_tum(vbool8_t vm, vbfloat16m2_t vd,
+ vuint8m1_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_xu_v_bf16m2_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfwcvt_f_xu_v_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv16bf16.nxv16i8.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfwcvt_f_xu_v_bf16m4_tum(vbool4_t vm, vbfloat16m4_t vd,
+ vuint8m2_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_xu_v_bf16m4_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfwcvt_f_xu_v_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv32bf16.nxv32i8.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfwcvt_f_xu_v_bf16m8_tum(vbool2_t vm, vbfloat16m8_t vd,
+ vuint8m4_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_xu_v_bf16m8_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwcvt_f_f_v_bf16mf4_f32mf2_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwcvt_f_f_v_bf16mf4_f32mf2_tum(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_f_v_bf16mf4_f32mf2_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwcvt_f_f_v_bf16mf2_f32m1_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwcvt_f_f_v_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_f_v_bf16mf2_f32m1_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwcvt_f_f_v_bf16m1_f32m2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwcvt_f_f_v_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_f_v_bf16m1_f32m2_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwcvt_f_f_v_bf16m2_f32m4_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwcvt_f_f_v_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_f_v_bf16m2_f32m4_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwcvt_f_f_v_bf16m4_f32m8_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwcvt_f_f_v_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_f_v_bf16m4_f32m8_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfwcvt_f_x_v_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv1bf16.nxv1i8.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfwcvt_f_x_v_bf16mf4_tumu(vbool64_t vm, vbfloat16mf4_t vd,
+ vint8mf8_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_x_v_bf16mf4_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfwcvt_f_x_v_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv2bf16.nxv2i8.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfwcvt_f_x_v_bf16mf2_tumu(vbool32_t vm, vbfloat16mf2_t vd,
+ vint8mf4_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_x_v_bf16mf2_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfwcvt_f_x_v_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv4bf16.nxv4i8.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfwcvt_f_x_v_bf16m1_tumu(vbool16_t vm, vbfloat16m1_t vd,
+ vint8mf2_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_x_v_bf16m1_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfwcvt_f_x_v_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv8bf16.nxv8i8.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfwcvt_f_x_v_bf16m2_tumu(vbool8_t vm, vbfloat16m2_t vd,
+ vint8m1_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_x_v_bf16m2_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfwcvt_f_x_v_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv16bf16.nxv16i8.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfwcvt_f_x_v_bf16m4_tumu(vbool4_t vm, vbfloat16m4_t vd,
+ vint8m2_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_x_v_bf16m4_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfwcvt_f_x_v_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv32bf16.nxv32i8.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfwcvt_f_x_v_bf16m8_tumu(vbool2_t vm, vbfloat16m8_t vd,
+ vint8m4_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_x_v_bf16m8_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfwcvt_f_xu_v_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv1bf16.nxv1i8.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfwcvt_f_xu_v_bf16mf4_tumu(vbool64_t vm, vbfloat16mf4_t vd,
+ vuint8mf8_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_xu_v_bf16mf4_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfwcvt_f_xu_v_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv2bf16.nxv2i8.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfwcvt_f_xu_v_bf16mf2_tumu(vbool32_t vm, vbfloat16mf2_t vd,
+ vuint8mf4_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_xu_v_bf16mf2_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfwcvt_f_xu_v_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv4bf16.nxv4i8.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfwcvt_f_xu_v_bf16m1_tumu(vbool16_t vm, vbfloat16m1_t vd,
+ vuint8mf2_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_xu_v_bf16m1_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfwcvt_f_xu_v_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv8bf16.nxv8i8.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfwcvt_f_xu_v_bf16m2_tumu(vbool8_t vm, vbfloat16m2_t vd,
+ vuint8m1_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_xu_v_bf16m2_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfwcvt_f_xu_v_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv16bf16.nxv16i8.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfwcvt_f_xu_v_bf16m4_tumu(vbool4_t vm, vbfloat16m4_t vd,
+ vuint8m2_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_xu_v_bf16m4_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfwcvt_f_xu_v_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv32bf16.nxv32i8.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfwcvt_f_xu_v_bf16m8_tumu(vbool2_t vm, vbfloat16m8_t vd,
+ vuint8m4_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_xu_v_bf16m8_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwcvt_f_f_v_bf16mf4_f32mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwcvt_f_f_v_bf16mf4_f32mf2_tumu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_f_v_bf16mf4_f32mf2_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwcvt_f_f_v_bf16mf2_f32m1_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwcvt_f_f_v_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_f_v_bf16mf2_f32m1_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwcvt_f_f_v_bf16m1_f32m2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwcvt_f_f_v_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_f_v_bf16m1_f32m2_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwcvt_f_f_v_bf16m2_f32m4_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwcvt_f_f_v_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_f_v_bf16m2_f32m4_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwcvt_f_f_v_bf16m4_f32m8_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwcvt_f_f_v_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_f_v_bf16m4_f32m8_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfwcvt_f_x_v_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv1bf16.nxv1i8.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfwcvt_f_x_v_bf16mf4_mu(vbool64_t vm, vbfloat16mf4_t vd,
+ vint8mf8_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_x_v_bf16mf4_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfwcvt_f_x_v_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv2bf16.nxv2i8.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfwcvt_f_x_v_bf16mf2_mu(vbool32_t vm, vbfloat16mf2_t vd,
+ vint8mf4_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_x_v_bf16mf2_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfwcvt_f_x_v_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv4bf16.nxv4i8.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfwcvt_f_x_v_bf16m1_mu(vbool16_t vm, vbfloat16m1_t vd,
+ vint8mf2_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_x_v_bf16m1_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfwcvt_f_x_v_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv8bf16.nxv8i8.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfwcvt_f_x_v_bf16m2_mu(vbool8_t vm, vbfloat16m2_t vd,
+ vint8m1_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_x_v_bf16m2_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfwcvt_f_x_v_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv16bf16.nxv16i8.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfwcvt_f_x_v_bf16m4_mu(vbool4_t vm, vbfloat16m4_t vd,
+ vint8m2_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_x_v_bf16m4_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfwcvt_f_x_v_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv32bf16.nxv32i8.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfwcvt_f_x_v_bf16m8_mu(vbool2_t vm, vbfloat16m8_t vd,
+ vint8m4_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_x_v_bf16m8_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfwcvt_f_xu_v_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv1bf16.nxv1i8.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfwcvt_f_xu_v_bf16mf4_mu(vbool64_t vm, vbfloat16mf4_t vd,
+ vuint8mf8_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_xu_v_bf16mf4_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfwcvt_f_xu_v_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv2bf16.nxv2i8.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfwcvt_f_xu_v_bf16mf2_mu(vbool32_t vm, vbfloat16mf2_t vd,
+ vuint8mf4_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_xu_v_bf16mf2_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfwcvt_f_xu_v_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv4bf16.nxv4i8.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfwcvt_f_xu_v_bf16m1_mu(vbool16_t vm, vbfloat16m1_t vd,
+ vuint8mf2_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_xu_v_bf16m1_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfwcvt_f_xu_v_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv8bf16.nxv8i8.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfwcvt_f_xu_v_bf16m2_mu(vbool8_t vm, vbfloat16m2_t vd,
+ vuint8m1_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_xu_v_bf16m2_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfwcvt_f_xu_v_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv16bf16.nxv16i8.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfwcvt_f_xu_v_bf16m4_mu(vbool4_t vm, vbfloat16m4_t vd,
+ vuint8m2_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_xu_v_bf16m4_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfwcvt_f_xu_v_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv32bf16.nxv32i8.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfwcvt_f_xu_v_bf16m8_mu(vbool2_t vm, vbfloat16m8_t vd,
+ vuint8m4_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_xu_v_bf16m8_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwcvt_f_f_v_bf16mf4_f32mf2_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwcvt_f_f_v_bf16mf4_f32mf2_mu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_f_v_bf16mf4_f32mf2_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwcvt_f_f_v_bf16mf2_f32m1_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwcvt_f_f_v_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_f_v_bf16mf2_f32m1_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwcvt_f_f_v_bf16m1_f32m2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwcvt_f_f_v_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_f_v_bf16m1_f32m2_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwcvt_f_f_v_bf16m2_f32m4_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwcvt_f_f_v_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_f_v_bf16m2_f32m4_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwcvt_f_f_v_bf16m4_f32m8_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwcvt_f_f_v_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_f_v_bf16m4_f32m8_mu(vm, vd, vs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfwmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfwmacc.c
new file mode 100644
index 0000000000000..f67b1001a5fcb
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfwmacc.c
@@ -0,0 +1,1017 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmacc_vv_bf16mf4_f32mf2_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmacc_vv_bf16mf4_f32mf2_tu(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vv_bf16mf4_f32mf2_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmacc_vf_bf16mf4_f32mf2_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmacc_vf_bf16mf4_f32mf2_tu(vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vf_bf16mf4_f32mf2_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmacc_vv_bf16mf2_f32m1_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmacc_vv_bf16mf2_f32m1_tu(vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vv_bf16mf2_f32m1_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmacc_vf_bf16mf2_f32m1_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmacc_vf_bf16mf2_f32m1_tu(vfloat32m1_t vd, __bf16 vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vf_bf16mf2_f32m1_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmacc_vv_bf16m1_f32m2_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmacc_vv_bf16m1_f32m2_tu(vfloat32m2_t vd, vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vv_bf16m1_f32m2_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmacc_vf_bf16m1_f32m2_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmacc_vf_bf16m1_f32m2_tu(vfloat32m2_t vd, __bf16 vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vf_bf16m1_f32m2_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmacc_vv_bf16m2_f32m4_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmacc_vv_bf16m2_f32m4_tu(vfloat32m4_t vd, vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vv_bf16m2_f32m4_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmacc_vf_bf16m2_f32m4_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmacc_vf_bf16m2_f32m4_tu(vfloat32m4_t vd, __bf16 vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vf_bf16m2_f32m4_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmacc_vv_bf16m4_f32m8_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmacc_vv_bf16m4_f32m8_tu(vfloat32m8_t vd, vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vv_bf16m4_f32m8_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmacc_vf_bf16m4_f32m8_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmacc_vf_bf16m4_f32m8_tu(vfloat32m8_t vd, __bf16 vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vf_bf16m4_f32m8_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmacc_vv_bf16mf4_f32mf2_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmacc_vv_bf16mf4_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vv_bf16mf4_f32mf2_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmacc_vf_bf16mf4_f32mf2_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmacc_vf_bf16mf4_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd,
+ __bf16 vs1, vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vf_bf16mf4_f32mf2_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmacc_vv_bf16mf2_f32m1_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmacc_vv_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vv_bf16mf2_f32m1_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmacc_vf_bf16mf2_f32m1_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmacc_vf_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vf_bf16mf2_f32m1_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmacc_vv_bf16m1_f32m2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmacc_vv_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vv_bf16m1_f32m2_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmacc_vf_bf16m1_f32m2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmacc_vf_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vf_bf16m1_f32m2_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmacc_vv_bf16m2_f32m4_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmacc_vv_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vv_bf16m2_f32m4_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmacc_vf_bf16m2_f32m4_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmacc_vf_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vf_bf16m2_f32m4_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmacc_vv_bf16m4_f32m8_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmacc_vv_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vv_bf16m4_f32m8_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmacc_vf_bf16m4_f32m8_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmacc_vf_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vf_bf16m4_f32m8_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmacc_vv_bf16mf4_f32mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmacc_vv_bf16mf4_f32mf2_tumu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vv_bf16mf4_f32mf2_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmacc_vf_bf16mf4_f32mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmacc_vf_bf16mf4_f32mf2_tumu(vbool64_t vm,
+ vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vf_bf16mf4_f32mf2_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmacc_vv_bf16mf2_f32m1_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmacc_vv_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vv_bf16mf2_f32m1_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmacc_vf_bf16mf2_f32m1_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmacc_vf_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vf_bf16mf2_f32m1_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmacc_vv_bf16m1_f32m2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmacc_vv_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vv_bf16m1_f32m2_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmacc_vf_bf16m1_f32m2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmacc_vf_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vf_bf16m1_f32m2_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmacc_vv_bf16m2_f32m4_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmacc_vv_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vv_bf16m2_f32m4_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmacc_vf_bf16m2_f32m4_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmacc_vf_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vf_bf16m2_f32m4_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmacc_vv_bf16m4_f32m8_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmacc_vv_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vv_bf16m4_f32m8_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmacc_vf_bf16m4_f32m8_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmacc_vf_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vf_bf16m4_f32m8_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmacc_vv_bf16mf4_f32mf2_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmacc_vv_bf16mf4_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vv_bf16mf4_f32mf2_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmacc_vf_bf16mf4_f32mf2_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmacc_vf_bf16mf4_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd,
+ __bf16 vs1, vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vf_bf16mf4_f32mf2_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmacc_vv_bf16mf2_f32m1_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmacc_vv_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vv_bf16mf2_f32m1_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmacc_vf_bf16mf2_f32m1_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmacc_vf_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vf_bf16mf2_f32m1_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmacc_vv_bf16m1_f32m2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmacc_vv_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vv_bf16m1_f32m2_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmacc_vf_bf16m1_f32m2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmacc_vf_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vf_bf16m1_f32m2_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmacc_vv_bf16m2_f32m4_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmacc_vv_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vv_bf16m2_f32m4_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmacc_vf_bf16m2_f32m4_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmacc_vf_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vf_bf16m2_f32m4_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmacc_vv_bf16m4_f32m8_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmacc_vv_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vv_bf16m4_f32m8_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmacc_vf_bf16m4_f32m8_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmacc_vf_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vf_bf16m4_f32m8_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmacc_vv_bf16mf4_f32mf2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmacc_vv_bf16mf4_f32mf2_rm_tu(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vv_bf16mf4_f32mf2_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmacc_vf_bf16mf4_f32mf2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmacc_vf_bf16mf4_f32mf2_rm_tu(vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vf_bf16mf4_f32mf2_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmacc_vv_bf16mf2_f32m1_rm_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmacc_vv_bf16mf2_f32m1_rm_tu(vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vv_bf16mf2_f32m1_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmacc_vf_bf16mf2_f32m1_rm_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmacc_vf_bf16mf2_f32m1_rm_tu(vfloat32m1_t vd, __bf16 vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vf_bf16mf2_f32m1_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmacc_vv_bf16m1_f32m2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmacc_vv_bf16m1_f32m2_rm_tu(vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vv_bf16m1_f32m2_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmacc_vf_bf16m1_f32m2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmacc_vf_bf16m1_f32m2_rm_tu(vfloat32m2_t vd, __bf16 vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vf_bf16m1_f32m2_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmacc_vv_bf16m2_f32m4_rm_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmacc_vv_bf16m2_f32m4_rm_tu(vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vv_bf16m2_f32m4_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmacc_vf_bf16m2_f32m4_rm_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmacc_vf_bf16m2_f32m4_rm_tu(vfloat32m4_t vd, __bf16 vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vf_bf16m2_f32m4_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmacc_vv_bf16m4_f32m8_rm_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmacc_vv_bf16m4_f32m8_rm_tu(vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vv_bf16m4_f32m8_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmacc_vf_bf16m4_f32m8_rm_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmacc_vf_bf16m4_f32m8_rm_tu(vfloat32m8_t vd, __bf16 vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vf_bf16m4_f32m8_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmacc_vv_bf16mf4_f32mf2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmacc_vv_bf16mf4_f32mf2_rm_tum(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vv_bf16mf4_f32mf2_rm_tum(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmacc_vf_bf16mf4_f32mf2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmacc_vf_bf16mf4_f32mf2_rm_tum(
+ vbool64_t vm, vfloat32mf2_t vd, __bf16 vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vf_bf16mf4_f32mf2_rm_tum(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmacc_vv_bf16mf2_f32m1_rm_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmacc_vv_bf16mf2_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vv_bf16mf2_f32m1_rm_tum(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmacc_vf_bf16mf2_f32m1_rm_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmacc_vf_bf16mf2_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vf_bf16mf2_f32m1_rm_tum(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmacc_vv_bf16m1_f32m2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmacc_vv_bf16m1_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vv_bf16m1_f32m2_rm_tum(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmacc_vf_bf16m1_f32m2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmacc_vf_bf16m1_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vf_bf16m1_f32m2_rm_tum(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmacc_vv_bf16m2_f32m4_rm_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmacc_vv_bf16m2_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vv_bf16m2_f32m4_rm_tum(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmacc_vf_bf16m2_f32m4_rm_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmacc_vf_bf16m2_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vf_bf16m2_f32m4_rm_tum(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmacc_vv_bf16m4_f32m8_rm_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmacc_vv_bf16m4_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vv_bf16m4_f32m8_rm_tum(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmacc_vf_bf16m4_f32m8_rm_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmacc_vf_bf16m4_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vf_bf16m4_f32m8_rm_tum(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmacc_vv_bf16mf4_f32mf2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmacc_vv_bf16mf4_f32mf2_rm_tumu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vv_bf16mf4_f32mf2_rm_tumu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmacc_vf_bf16mf4_f32mf2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmacc_vf_bf16mf4_f32mf2_rm_tumu(
+ vbool64_t vm, vfloat32mf2_t vd, __bf16 vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vf_bf16mf4_f32mf2_rm_tumu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmacc_vv_bf16mf2_f32m1_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmacc_vv_bf16mf2_f32m1_rm_tumu(vbool32_t vm,
+ vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vv_bf16mf2_f32m1_rm_tumu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmacc_vf_bf16mf2_f32m1_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmacc_vf_bf16mf2_f32m1_rm_tumu(vbool32_t vm,
+ vfloat32m1_t vd, __bf16 vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vf_bf16mf2_f32m1_rm_tumu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmacc_vv_bf16m1_f32m2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmacc_vv_bf16m1_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vv_bf16m1_f32m2_rm_tumu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmacc_vf_bf16m1_f32m2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmacc_vf_bf16m1_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vf_bf16m1_f32m2_rm_tumu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmacc_vv_bf16m2_f32m4_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmacc_vv_bf16m2_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vv_bf16m2_f32m4_rm_tumu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmacc_vf_bf16m2_f32m4_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmacc_vf_bf16m2_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vf_bf16m2_f32m4_rm_tumu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmacc_vv_bf16m4_f32m8_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmacc_vv_bf16m4_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vv_bf16m4_f32m8_rm_tumu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmacc_vf_bf16m4_f32m8_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmacc_vf_bf16m4_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vf_bf16m4_f32m8_rm_tumu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmacc_vv_bf16mf4_f32mf2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmacc_vv_bf16mf4_f32mf2_rm_mu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vv_bf16mf4_f32mf2_rm_mu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmacc_vf_bf16mf4_f32mf2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmacc_vf_bf16mf4_f32mf2_rm_mu(vbool64_t vm,
+ vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vf_bf16mf4_f32mf2_rm_mu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmacc_vv_bf16mf2_f32m1_rm_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmacc_vv_bf16mf2_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vv_bf16mf2_f32m1_rm_mu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmacc_vf_bf16mf2_f32m1_rm_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmacc_vf_bf16mf2_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vf_bf16mf2_f32m1_rm_mu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmacc_vv_bf16m1_f32m2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmacc_vv_bf16m1_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vv_bf16m1_f32m2_rm_mu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmacc_vf_bf16m1_f32m2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmacc_vf_bf16m1_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vf_bf16m1_f32m2_rm_mu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmacc_vv_bf16m2_f32m4_rm_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmacc_vv_bf16m2_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vv_bf16m2_f32m4_rm_mu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmacc_vf_bf16m2_f32m4_rm_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmacc_vf_bf16m2_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vf_bf16m2_f32m4_rm_mu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmacc_vv_bf16m4_f32m8_rm_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmacc_vv_bf16m4_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmacc_vv_bf16m4_f32m8_rm_mu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmacc_vf_bf16m4_f32m8_rm_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmacc_vf_bf16m4_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_vf_bf16m4_f32m8_rm_mu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfwmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfwmsac.c
new file mode 100644
index 0000000000000..6d78c74e14694
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfwmsac.c
@@ -0,0 +1,1017 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmsac_vv_bf16mf4_f32mf2_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmsac_vv_bf16mf4_f32mf2_tu(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vv_bf16mf4_f32mf2_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmsac_vf_bf16mf4_f32mf2_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmsac_vf_bf16mf4_f32mf2_tu(vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vf_bf16mf4_f32mf2_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmsac_vv_bf16mf2_f32m1_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmsac_vv_bf16mf2_f32m1_tu(vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vv_bf16mf2_f32m1_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmsac_vf_bf16mf2_f32m1_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmsac_vf_bf16mf2_f32m1_tu(vfloat32m1_t vd, __bf16 vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vf_bf16mf2_f32m1_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmsac_vv_bf16m1_f32m2_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmsac_vv_bf16m1_f32m2_tu(vfloat32m2_t vd, vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vv_bf16m1_f32m2_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmsac_vf_bf16m1_f32m2_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmsac_vf_bf16m1_f32m2_tu(vfloat32m2_t vd, __bf16 vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vf_bf16m1_f32m2_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmsac_vv_bf16m2_f32m4_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmsac_vv_bf16m2_f32m4_tu(vfloat32m4_t vd, vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vv_bf16m2_f32m4_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmsac_vf_bf16m2_f32m4_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmsac_vf_bf16m2_f32m4_tu(vfloat32m4_t vd, __bf16 vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vf_bf16m2_f32m4_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmsac_vv_bf16m4_f32m8_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmsac_vv_bf16m4_f32m8_tu(vfloat32m8_t vd, vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vv_bf16m4_f32m8_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmsac_vf_bf16m4_f32m8_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmsac_vf_bf16m4_f32m8_tu(vfloat32m8_t vd, __bf16 vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vf_bf16m4_f32m8_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmsac_vv_bf16mf4_f32mf2_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmsac_vv_bf16mf4_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vv_bf16mf4_f32mf2_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmsac_vf_bf16mf4_f32mf2_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmsac_vf_bf16mf4_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd,
+ __bf16 vs1, vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vf_bf16mf4_f32mf2_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmsac_vv_bf16mf2_f32m1_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmsac_vv_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vv_bf16mf2_f32m1_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmsac_vf_bf16mf2_f32m1_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmsac_vf_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vf_bf16mf2_f32m1_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmsac_vv_bf16m1_f32m2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmsac_vv_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vv_bf16m1_f32m2_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmsac_vf_bf16m1_f32m2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmsac_vf_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vf_bf16m1_f32m2_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmsac_vv_bf16m2_f32m4_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmsac_vv_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vv_bf16m2_f32m4_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmsac_vf_bf16m2_f32m4_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmsac_vf_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vf_bf16m2_f32m4_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmsac_vv_bf16m4_f32m8_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmsac_vv_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vv_bf16m4_f32m8_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmsac_vf_bf16m4_f32m8_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmsac_vf_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vf_bf16m4_f32m8_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmsac_vv_bf16mf4_f32mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmsac_vv_bf16mf4_f32mf2_tumu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vv_bf16mf4_f32mf2_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmsac_vf_bf16mf4_f32mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmsac_vf_bf16mf4_f32mf2_tumu(vbool64_t vm,
+ vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vf_bf16mf4_f32mf2_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmsac_vv_bf16mf2_f32m1_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmsac_vv_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vv_bf16mf2_f32m1_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmsac_vf_bf16mf2_f32m1_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmsac_vf_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vf_bf16mf2_f32m1_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmsac_vv_bf16m1_f32m2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmsac_vv_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vv_bf16m1_f32m2_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmsac_vf_bf16m1_f32m2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmsac_vf_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vf_bf16m1_f32m2_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmsac_vv_bf16m2_f32m4_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmsac_vv_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vv_bf16m2_f32m4_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmsac_vf_bf16m2_f32m4_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmsac_vf_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vf_bf16m2_f32m4_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmsac_vv_bf16m4_f32m8_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmsac_vv_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vv_bf16m4_f32m8_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmsac_vf_bf16m4_f32m8_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmsac_vf_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vf_bf16m4_f32m8_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmsac_vv_bf16mf4_f32mf2_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmsac_vv_bf16mf4_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vv_bf16mf4_f32mf2_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmsac_vf_bf16mf4_f32mf2_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmsac_vf_bf16mf4_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd,
+ __bf16 vs1, vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vf_bf16mf4_f32mf2_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmsac_vv_bf16mf2_f32m1_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmsac_vv_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vv_bf16mf2_f32m1_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmsac_vf_bf16mf2_f32m1_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmsac_vf_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vf_bf16mf2_f32m1_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmsac_vv_bf16m1_f32m2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmsac_vv_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vv_bf16m1_f32m2_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmsac_vf_bf16m1_f32m2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmsac_vf_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vf_bf16m1_f32m2_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmsac_vv_bf16m2_f32m4_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmsac_vv_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vv_bf16m2_f32m4_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmsac_vf_bf16m2_f32m4_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmsac_vf_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vf_bf16m2_f32m4_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmsac_vv_bf16m4_f32m8_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmsac_vv_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vv_bf16m4_f32m8_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmsac_vf_bf16m4_f32m8_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmsac_vf_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vf_bf16m4_f32m8_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmsac_vv_bf16mf4_f32mf2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmsac_vv_bf16mf4_f32mf2_rm_tu(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vv_bf16mf4_f32mf2_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmsac_vf_bf16mf4_f32mf2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmsac_vf_bf16mf4_f32mf2_rm_tu(vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vf_bf16mf4_f32mf2_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmsac_vv_bf16mf2_f32m1_rm_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmsac_vv_bf16mf2_f32m1_rm_tu(vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vv_bf16mf2_f32m1_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmsac_vf_bf16mf2_f32m1_rm_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmsac_vf_bf16mf2_f32m1_rm_tu(vfloat32m1_t vd, __bf16 vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vf_bf16mf2_f32m1_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmsac_vv_bf16m1_f32m2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmsac_vv_bf16m1_f32m2_rm_tu(vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vv_bf16m1_f32m2_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmsac_vf_bf16m1_f32m2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmsac_vf_bf16m1_f32m2_rm_tu(vfloat32m2_t vd, __bf16 vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vf_bf16m1_f32m2_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmsac_vv_bf16m2_f32m4_rm_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmsac_vv_bf16m2_f32m4_rm_tu(vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vv_bf16m2_f32m4_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmsac_vf_bf16m2_f32m4_rm_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmsac_vf_bf16m2_f32m4_rm_tu(vfloat32m4_t vd, __bf16 vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vf_bf16m2_f32m4_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmsac_vv_bf16m4_f32m8_rm_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmsac_vv_bf16m4_f32m8_rm_tu(vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vv_bf16m4_f32m8_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmsac_vf_bf16m4_f32m8_rm_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmsac_vf_bf16m4_f32m8_rm_tu(vfloat32m8_t vd, __bf16 vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vf_bf16m4_f32m8_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmsac_vv_bf16mf4_f32mf2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmsac_vv_bf16mf4_f32mf2_rm_tum(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vv_bf16mf4_f32mf2_rm_tum(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmsac_vf_bf16mf4_f32mf2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmsac_vf_bf16mf4_f32mf2_rm_tum(
+ vbool64_t vm, vfloat32mf2_t vd, __bf16 vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vf_bf16mf4_f32mf2_rm_tum(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmsac_vv_bf16mf2_f32m1_rm_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmsac_vv_bf16mf2_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vv_bf16mf2_f32m1_rm_tum(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmsac_vf_bf16mf2_f32m1_rm_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmsac_vf_bf16mf2_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vf_bf16mf2_f32m1_rm_tum(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmsac_vv_bf16m1_f32m2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmsac_vv_bf16m1_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vv_bf16m1_f32m2_rm_tum(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmsac_vf_bf16m1_f32m2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmsac_vf_bf16m1_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vf_bf16m1_f32m2_rm_tum(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmsac_vv_bf16m2_f32m4_rm_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmsac_vv_bf16m2_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vv_bf16m2_f32m4_rm_tum(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmsac_vf_bf16m2_f32m4_rm_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmsac_vf_bf16m2_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vf_bf16m2_f32m4_rm_tum(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmsac_vv_bf16m4_f32m8_rm_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmsac_vv_bf16m4_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vv_bf16m4_f32m8_rm_tum(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmsac_vf_bf16m4_f32m8_rm_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmsac_vf_bf16m4_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vf_bf16m4_f32m8_rm_tum(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmsac_vv_bf16mf4_f32mf2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmsac_vv_bf16mf4_f32mf2_rm_tumu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vv_bf16mf4_f32mf2_rm_tumu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmsac_vf_bf16mf4_f32mf2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmsac_vf_bf16mf4_f32mf2_rm_tumu(
+ vbool64_t vm, vfloat32mf2_t vd, __bf16 vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vf_bf16mf4_f32mf2_rm_tumu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmsac_vv_bf16mf2_f32m1_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmsac_vv_bf16mf2_f32m1_rm_tumu(vbool32_t vm,
+ vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vv_bf16mf2_f32m1_rm_tumu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmsac_vf_bf16mf2_f32m1_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmsac_vf_bf16mf2_f32m1_rm_tumu(vbool32_t vm,
+ vfloat32m1_t vd, __bf16 vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vf_bf16mf2_f32m1_rm_tumu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmsac_vv_bf16m1_f32m2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmsac_vv_bf16m1_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vv_bf16m1_f32m2_rm_tumu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmsac_vf_bf16m1_f32m2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmsac_vf_bf16m1_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vf_bf16m1_f32m2_rm_tumu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmsac_vv_bf16m2_f32m4_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmsac_vv_bf16m2_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vv_bf16m2_f32m4_rm_tumu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmsac_vf_bf16m2_f32m4_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmsac_vf_bf16m2_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vf_bf16m2_f32m4_rm_tumu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmsac_vv_bf16m4_f32m8_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmsac_vv_bf16m4_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vv_bf16m4_f32m8_rm_tumu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmsac_vf_bf16m4_f32m8_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmsac_vf_bf16m4_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vf_bf16m4_f32m8_rm_tumu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmsac_vv_bf16mf4_f32mf2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmsac_vv_bf16mf4_f32mf2_rm_mu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vv_bf16mf4_f32mf2_rm_mu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmsac_vf_bf16mf4_f32mf2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmsac_vf_bf16mf4_f32mf2_rm_mu(vbool64_t vm,
+ vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vf_bf16mf4_f32mf2_rm_mu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmsac_vv_bf16mf2_f32m1_rm_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmsac_vv_bf16mf2_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vv_bf16mf2_f32m1_rm_mu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmsac_vf_bf16mf2_f32m1_rm_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmsac_vf_bf16mf2_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vf_bf16mf2_f32m1_rm_mu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmsac_vv_bf16m1_f32m2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmsac_vv_bf16m1_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vv_bf16m1_f32m2_rm_mu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmsac_vf_bf16m1_f32m2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmsac_vf_bf16m1_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vf_bf16m1_f32m2_rm_mu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmsac_vv_bf16m2_f32m4_rm_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmsac_vv_bf16m2_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vv_bf16m2_f32m4_rm_mu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmsac_vf_bf16m2_f32m4_rm_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmsac_vf_bf16m2_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vf_bf16m2_f32m4_rm_mu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmsac_vv_bf16m4_f32m8_rm_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmsac_vv_bf16m4_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmsac_vv_bf16m4_f32m8_rm_mu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmsac_vf_bf16m4_f32m8_rm_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmsac_vf_bf16m4_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_vf_bf16m4_f32m8_rm_mu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfwmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfwmul.c
new file mode 100644
index 0000000000000..9fcfe818be71f
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfwmul.c
@@ -0,0 +1,1015 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmul_vv_bf16mf4_f32mf2_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmul_vv_bf16mf4_f32mf2_tu(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwmul_vv_bf16mf4_f32mf2_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmul_vf_bf16mf4_f32mf2_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmul_vf_bf16mf4_f32mf2_tu(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_vf_bf16mf4_f32mf2_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmul_vv_bf16mf2_f32m1_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmul_vv_bf16mf2_f32m1_tu(vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwmul_vv_bf16mf2_f32m1_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmul_vf_bf16mf2_f32m1_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmul_vf_bf16mf2_f32m1_tu(vfloat32m1_t vd,
+ vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_vf_bf16mf2_f32m1_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmul_vv_bf16m1_f32m2_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmul_vv_bf16m1_f32m2_tu(vfloat32m2_t vd, vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwmul_vv_bf16m1_f32m2_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmul_vf_bf16m1_f32m2_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmul_vf_bf16m1_f32m2_tu(vfloat32m2_t vd, vbfloat16m1_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwmul_vf_bf16m1_f32m2_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmul_vv_bf16m2_f32m4_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmul_vv_bf16m2_f32m4_tu(vfloat32m4_t vd, vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwmul_vv_bf16m2_f32m4_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmul_vf_bf16m2_f32m4_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmul_vf_bf16m2_f32m4_tu(vfloat32m4_t vd, vbfloat16m2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwmul_vf_bf16m2_f32m4_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmul_vv_bf16m4_f32m8_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmul_vv_bf16m4_f32m8_tu(vfloat32m8_t vd, vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwmul_vv_bf16m4_f32m8_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmul_vf_bf16m4_f32m8_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmul_vf_bf16m4_f32m8_tu(vfloat32m8_t vd, vbfloat16m4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwmul_vf_bf16m4_f32m8_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmul_vv_bf16mf4_f32mf2_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmul_vv_bf16mf4_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwmul_vv_bf16mf4_f32mf2_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmul_vf_bf16mf4_f32mf2_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmul_vf_bf16mf4_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_vf_bf16mf4_f32mf2_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmul_vv_bf16mf2_f32m1_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmul_vv_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwmul_vv_bf16mf2_f32m1_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmul_vf_bf16mf2_f32m1_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmul_vf_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_vf_bf16mf2_f32m1_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmul_vv_bf16m1_f32m2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmul_vv_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwmul_vv_bf16m1_f32m2_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmul_vf_bf16m1_f32m2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmul_vf_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_vf_bf16m1_f32m2_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmul_vv_bf16m2_f32m4_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmul_vv_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwmul_vv_bf16m2_f32m4_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmul_vf_bf16m2_f32m4_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmul_vf_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_vf_bf16m2_f32m4_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmul_vv_bf16m4_f32m8_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmul_vv_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwmul_vv_bf16m4_f32m8_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmul_vf_bf16m4_f32m8_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmul_vf_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_vf_bf16m4_f32m8_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmul_vv_bf16mf4_f32mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmul_vv_bf16mf4_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwmul_vv_bf16mf4_f32mf2_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmul_vf_bf16mf4_f32mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmul_vf_bf16mf4_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_vf_bf16mf4_f32mf2_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmul_vv_bf16mf2_f32m1_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmul_vv_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwmul_vv_bf16mf2_f32m1_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmul_vf_bf16mf2_f32m1_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmul_vf_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_vf_bf16mf2_f32m1_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmul_vv_bf16m1_f32m2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmul_vv_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwmul_vv_bf16m1_f32m2_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmul_vf_bf16m1_f32m2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmul_vf_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_vf_bf16m1_f32m2_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmul_vv_bf16m2_f32m4_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmul_vv_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwmul_vv_bf16m2_f32m4_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmul_vf_bf16m2_f32m4_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmul_vf_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_vf_bf16m2_f32m4_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmul_vv_bf16m4_f32m8_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmul_vv_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwmul_vv_bf16m4_f32m8_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmul_vf_bf16m4_f32m8_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmul_vf_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_vf_bf16m4_f32m8_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmul_vv_bf16mf4_f32mf2_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmul_vv_bf16mf4_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwmul_vv_bf16mf4_f32mf2_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmul_vf_bf16mf4_f32mf2_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmul_vf_bf16mf4_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_vf_bf16mf4_f32mf2_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmul_vv_bf16mf2_f32m1_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmul_vv_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwmul_vv_bf16mf2_f32m1_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmul_vf_bf16mf2_f32m1_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmul_vf_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_vf_bf16mf2_f32m1_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmul_vv_bf16m1_f32m2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmul_vv_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwmul_vv_bf16m1_f32m2_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmul_vf_bf16m1_f32m2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmul_vf_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_vf_bf16m1_f32m2_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmul_vv_bf16m2_f32m4_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmul_vv_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwmul_vv_bf16m2_f32m4_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmul_vf_bf16m2_f32m4_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmul_vf_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_vf_bf16m2_f32m4_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmul_vv_bf16m4_f32m8_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmul_vv_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwmul_vv_bf16m4_f32m8_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmul_vf_bf16m4_f32m8_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmul_vf_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_vf_bf16m4_f32m8_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmul_vv_bf16mf4_f32mf2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmul_vv_bf16mf4_f32mf2_rm_tu(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwmul_vv_bf16mf4_f32mf2_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmul_vf_bf16mf4_f32mf2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmul_vf_bf16mf4_f32mf2_rm_tu(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwmul_vf_bf16mf4_f32mf2_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmul_vv_bf16mf2_f32m1_rm_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmul_vv_bf16mf2_f32m1_rm_tu(vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwmul_vv_bf16mf2_f32m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmul_vf_bf16mf2_f32m1_rm_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmul_vf_bf16mf2_f32m1_rm_tu(vfloat32m1_t vd,
+ vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_vf_bf16mf2_f32m1_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmul_vv_bf16m1_f32m2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmul_vv_bf16m1_f32m2_rm_tu(vfloat32m2_t vd,
+ vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwmul_vv_bf16m1_f32m2_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmul_vf_bf16m1_f32m2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmul_vf_bf16m1_f32m2_rm_tu(vfloat32m2_t vd,
+ vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_vf_bf16m1_f32m2_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmul_vv_bf16m2_f32m4_rm_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmul_vv_bf16m2_f32m4_rm_tu(vfloat32m4_t vd,
+ vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwmul_vv_bf16m2_f32m4_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmul_vf_bf16m2_f32m4_rm_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmul_vf_bf16m2_f32m4_rm_tu(vfloat32m4_t vd,
+ vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_vf_bf16m2_f32m4_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmul_vv_bf16m4_f32m8_rm_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmul_vv_bf16m4_f32m8_rm_tu(vfloat32m8_t vd,
+ vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwmul_vv_bf16m4_f32m8_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmul_vf_bf16m4_f32m8_rm_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmul_vf_bf16m4_f32m8_rm_tu(vfloat32m8_t vd,
+ vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_vf_bf16m4_f32m8_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmul_vv_bf16mf4_f32mf2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmul_vv_bf16mf4_f32mf2_rm_tum(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwmul_vv_bf16mf4_f32mf2_rm_tum(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmul_vf_bf16mf4_f32mf2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmul_vf_bf16mf4_f32mf2_rm_tum(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwmul_vf_bf16mf4_f32mf2_rm_tum(vm, vd, vs2, rs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmul_vv_bf16mf2_f32m1_rm_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmul_vv_bf16mf2_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1,
+ size_t vl) {
+ return __riscv_vfwmul_vv_bf16mf2_f32m1_rm_tum(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmul_vf_bf16mf2_f32m1_rm_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmul_vf_bf16mf2_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_vf_bf16mf2_f32m1_rm_tum(vm, vd, vs2, rs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmul_vv_bf16m1_f32m2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmul_vv_bf16m1_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwmul_vv_bf16m1_f32m2_rm_tum(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmul_vf_bf16m1_f32m2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmul_vf_bf16m1_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_vf_bf16m1_f32m2_rm_tum(vm, vd, vs2, rs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmul_vv_bf16m2_f32m4_rm_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmul_vv_bf16m2_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwmul_vv_bf16m2_f32m4_rm_tum(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmul_vf_bf16m2_f32m4_rm_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmul_vf_bf16m2_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_vf_bf16m2_f32m4_rm_tum(vm, vd, vs2, rs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmul_vv_bf16m4_f32m8_rm_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmul_vv_bf16m4_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwmul_vv_bf16m4_f32m8_rm_tum(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmul_vf_bf16m4_f32m8_rm_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmul_vf_bf16m4_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_vf_bf16m4_f32m8_rm_tum(vm, vd, vs2, rs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmul_vv_bf16mf4_f32mf2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmul_vv_bf16mf4_f32mf2_rm_tumu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwmul_vv_bf16mf4_f32mf2_rm_tumu(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmul_vf_bf16mf4_f32mf2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmul_vf_bf16mf4_f32mf2_rm_tumu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwmul_vf_bf16mf4_f32mf2_rm_tumu(vm, vd, vs2, rs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmul_vv_bf16mf2_f32m1_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmul_vv_bf16mf2_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1,
+ size_t vl) {
+ return __riscv_vfwmul_vv_bf16mf2_f32m1_rm_tumu(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmul_vf_bf16mf2_f32m1_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmul_vf_bf16mf2_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwmul_vf_bf16mf2_f32m1_rm_tumu(vm, vd, vs2, rs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmul_vv_bf16m1_f32m2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmul_vv_bf16m1_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwmul_vv_bf16m1_f32m2_rm_tumu(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmul_vf_bf16m1_f32m2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmul_vf_bf16m1_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_vf_bf16m1_f32m2_rm_tumu(vm, vd, vs2, rs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmul_vv_bf16m2_f32m4_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmul_vv_bf16m2_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwmul_vv_bf16m2_f32m4_rm_tumu(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmul_vf_bf16m2_f32m4_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmul_vf_bf16m2_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_vf_bf16m2_f32m4_rm_tumu(vm, vd, vs2, rs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmul_vv_bf16m4_f32m8_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmul_vv_bf16m4_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwmul_vv_bf16m4_f32m8_rm_tumu(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmul_vf_bf16m4_f32m8_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmul_vf_bf16m4_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_vf_bf16m4_f32m8_rm_tumu(vm, vd, vs2, rs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmul_vv_bf16mf4_f32mf2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmul_vv_bf16mf4_f32mf2_rm_mu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwmul_vv_bf16mf4_f32mf2_rm_mu(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmul_vf_bf16mf4_f32mf2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmul_vf_bf16mf4_f32mf2_rm_mu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwmul_vf_bf16mf4_f32mf2_rm_mu(vm, vd, vs2, rs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmul_vv_bf16mf2_f32m1_rm_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmul_vv_bf16mf2_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwmul_vv_bf16mf2_f32m1_rm_mu(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmul_vf_bf16mf2_f32m1_rm_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmul_vf_bf16mf2_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_vf_bf16mf2_f32m1_rm_mu(vm, vd, vs2, rs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmul_vv_bf16m1_f32m2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmul_vv_bf16m1_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwmul_vv_bf16m1_f32m2_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmul_vf_bf16m1_f32m2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmul_vf_bf16m1_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_vf_bf16m1_f32m2_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmul_vv_bf16m2_f32m4_rm_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmul_vv_bf16m2_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwmul_vv_bf16m2_f32m4_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmul_vf_bf16m2_f32m4_rm_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmul_vf_bf16m2_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_vf_bf16m2_f32m4_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmul_vv_bf16m4_f32m8_rm_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmul_vv_bf16m4_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwmul_vv_bf16m4_f32m8_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmul_vf_bf16m4_f32m8_rm_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmul_vf_bf16m4_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_vf_bf16m4_f32m8_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfwnmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfwnmacc.c
new file mode 100644
index 0000000000000..73cc82219b201
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfwnmacc.c
@@ -0,0 +1,1034 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmacc_vv_bf16mf4_f32mf2_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmacc_vv_bf16mf4_f32mf2_tu(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16mf4_f32mf2_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmacc_vf_bf16mf4_f32mf2_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmacc_vf_bf16mf4_f32mf2_tu(vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16mf4_f32mf2_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmacc_vv_bf16mf2_f32m1_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmacc_vv_bf16mf2_f32m1_tu(vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16mf2_f32m1_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmacc_vf_bf16mf2_f32m1_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmacc_vf_bf16mf2_f32m1_tu(vfloat32m1_t vd, __bf16 vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16mf2_f32m1_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmacc_vv_bf16m1_f32m2_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmacc_vv_bf16m1_f32m2_tu(vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16m1_f32m2_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmacc_vf_bf16m1_f32m2_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmacc_vf_bf16m1_f32m2_tu(vfloat32m2_t vd, __bf16 vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16m1_f32m2_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmacc_vv_bf16m2_f32m4_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmacc_vv_bf16m2_f32m4_tu(vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16m2_f32m4_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmacc_vf_bf16m2_f32m4_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmacc_vf_bf16m2_f32m4_tu(vfloat32m4_t vd, __bf16 vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16m2_f32m4_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmacc_vv_bf16m4_f32m8_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmacc_vv_bf16m4_f32m8_tu(vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16m4_f32m8_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmacc_vf_bf16m4_f32m8_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmacc_vf_bf16m4_f32m8_tu(vfloat32m8_t vd, __bf16 vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16m4_f32m8_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmacc_vv_bf16mf4_f32mf2_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmacc_vv_bf16mf4_f32mf2_tum(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16mf4_f32mf2_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmacc_vf_bf16mf4_f32mf2_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmacc_vf_bf16mf4_f32mf2_tum(vbool64_t vm,
+ vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16mf4_f32mf2_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmacc_vv_bf16mf2_f32m1_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmacc_vv_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16mf2_f32m1_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmacc_vf_bf16mf2_f32m1_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmacc_vf_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16mf2_f32m1_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmacc_vv_bf16m1_f32m2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmacc_vv_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16m1_f32m2_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmacc_vf_bf16m1_f32m2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmacc_vf_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16m1_f32m2_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmacc_vv_bf16m2_f32m4_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmacc_vv_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16m2_f32m4_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmacc_vf_bf16m2_f32m4_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmacc_vf_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16m2_f32m4_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmacc_vv_bf16m4_f32m8_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmacc_vv_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16m4_f32m8_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmacc_vf_bf16m4_f32m8_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmacc_vf_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16m4_f32m8_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmacc_vv_bf16mf4_f32mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmacc_vv_bf16mf4_f32mf2_tumu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16mf4_f32mf2_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmacc_vf_bf16mf4_f32mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmacc_vf_bf16mf4_f32mf2_tumu(vbool64_t vm,
+ vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16mf4_f32mf2_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmacc_vv_bf16mf2_f32m1_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmacc_vv_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16mf2_f32m1_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmacc_vf_bf16mf2_f32m1_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmacc_vf_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16mf2_f32m1_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmacc_vv_bf16m1_f32m2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmacc_vv_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16m1_f32m2_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmacc_vf_bf16m1_f32m2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmacc_vf_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16m1_f32m2_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmacc_vv_bf16m2_f32m4_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmacc_vv_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16m2_f32m4_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmacc_vf_bf16m2_f32m4_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmacc_vf_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16m2_f32m4_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmacc_vv_bf16m4_f32m8_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmacc_vv_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16m4_f32m8_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmacc_vf_bf16m4_f32m8_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmacc_vf_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16m4_f32m8_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmacc_vv_bf16mf4_f32mf2_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmacc_vv_bf16mf4_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16mf4_f32mf2_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmacc_vf_bf16mf4_f32mf2_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmacc_vf_bf16mf4_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd,
+ __bf16 vs1, vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16mf4_f32mf2_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmacc_vv_bf16mf2_f32m1_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmacc_vv_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16mf2_f32m1_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmacc_vf_bf16mf2_f32m1_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmacc_vf_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16mf2_f32m1_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmacc_vv_bf16m1_f32m2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmacc_vv_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16m1_f32m2_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmacc_vf_bf16m1_f32m2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmacc_vf_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16m1_f32m2_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmacc_vv_bf16m2_f32m4_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmacc_vv_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16m2_f32m4_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmacc_vf_bf16m2_f32m4_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmacc_vf_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16m2_f32m4_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmacc_vv_bf16m4_f32m8_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmacc_vv_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16m4_f32m8_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmacc_vf_bf16m4_f32m8_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmacc_vf_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16m4_f32m8_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmacc_vv_bf16mf4_f32mf2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmacc_vv_bf16mf4_f32mf2_rm_tu(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16mf4_f32mf2_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmacc_vf_bf16mf4_f32mf2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmacc_vf_bf16mf4_f32mf2_rm_tu(vfloat32mf2_t vd,
+ __bf16 vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16mf4_f32mf2_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmacc_vv_bf16mf2_f32m1_rm_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmacc_vv_bf16mf2_f32m1_rm_tu(vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16mf2_f32m1_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmacc_vf_bf16mf2_f32m1_rm_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmacc_vf_bf16mf2_f32m1_rm_tu(vfloat32m1_t vd, __bf16 vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16mf2_f32m1_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmacc_vv_bf16m1_f32m2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmacc_vv_bf16m1_f32m2_rm_tu(vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16m1_f32m2_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmacc_vf_bf16m1_f32m2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmacc_vf_bf16m1_f32m2_rm_tu(vfloat32m2_t vd, __bf16 vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16m1_f32m2_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmacc_vv_bf16m2_f32m4_rm_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmacc_vv_bf16m2_f32m4_rm_tu(vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16m2_f32m4_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmacc_vf_bf16m2_f32m4_rm_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmacc_vf_bf16m2_f32m4_rm_tu(vfloat32m4_t vd, __bf16 vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16m2_f32m4_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmacc_vv_bf16m4_f32m8_rm_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmacc_vv_bf16m4_f32m8_rm_tu(vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16m4_f32m8_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmacc_vf_bf16m4_f32m8_rm_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmacc_vf_bf16m4_f32m8_rm_tu(vfloat32m8_t vd, __bf16 vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16m4_f32m8_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmacc_vv_bf16mf4_f32mf2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmacc_vv_bf16mf4_f32mf2_rm_tum(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16mf4_f32mf2_rm_tum(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmacc_vf_bf16mf4_f32mf2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmacc_vf_bf16mf4_f32mf2_rm_tum(
+ vbool64_t vm, vfloat32mf2_t vd, __bf16 vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16mf4_f32mf2_rm_tum(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmacc_vv_bf16mf2_f32m1_rm_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmacc_vv_bf16mf2_f32m1_rm_tum(vbool32_t vm,
+ vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16mf2_f32m1_rm_tum(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmacc_vf_bf16mf2_f32m1_rm_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmacc_vf_bf16mf2_f32m1_rm_tum(vbool32_t vm,
+ vfloat32m1_t vd, __bf16 vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16mf2_f32m1_rm_tum(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmacc_vv_bf16m1_f32m2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmacc_vv_bf16m1_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16m1_f32m2_rm_tum(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmacc_vf_bf16m1_f32m2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmacc_vf_bf16m1_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16m1_f32m2_rm_tum(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmacc_vv_bf16m2_f32m4_rm_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmacc_vv_bf16m2_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16m2_f32m4_rm_tum(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmacc_vf_bf16m2_f32m4_rm_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmacc_vf_bf16m2_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16m2_f32m4_rm_tum(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmacc_vv_bf16m4_f32m8_rm_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmacc_vv_bf16m4_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16m4_f32m8_rm_tum(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmacc_vf_bf16m4_f32m8_rm_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmacc_vf_bf16m4_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16m4_f32m8_rm_tum(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmacc_vv_bf16mf4_f32mf2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmacc_vv_bf16mf4_f32mf2_rm_tumu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16mf4_f32mf2_rm_tumu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmacc_vf_bf16mf4_f32mf2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmacc_vf_bf16mf4_f32mf2_rm_tumu(
+ vbool64_t vm, vfloat32mf2_t vd, __bf16 vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16mf4_f32mf2_rm_tumu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmacc_vv_bf16mf2_f32m1_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmacc_vv_bf16mf2_f32m1_rm_tumu(vbool32_t vm,
+ vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16mf2_f32m1_rm_tumu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmacc_vf_bf16mf2_f32m1_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmacc_vf_bf16mf2_f32m1_rm_tumu(vbool32_t vm,
+ vfloat32m1_t vd, __bf16 vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16mf2_f32m1_rm_tumu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmacc_vv_bf16m1_f32m2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmacc_vv_bf16m1_f32m2_rm_tumu(vbool16_t vm,
+ vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16m1_f32m2_rm_tumu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmacc_vf_bf16m1_f32m2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmacc_vf_bf16m1_f32m2_rm_tumu(vbool16_t vm,
+ vfloat32m2_t vd, __bf16 vs1,
+ vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16m1_f32m2_rm_tumu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmacc_vv_bf16m2_f32m4_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmacc_vv_bf16m2_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16m2_f32m4_rm_tumu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmacc_vf_bf16m2_f32m4_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmacc_vf_bf16m2_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1,
+ vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16m2_f32m4_rm_tumu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmacc_vv_bf16m4_f32m8_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmacc_vv_bf16m4_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16m4_f32m8_rm_tumu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmacc_vf_bf16m4_f32m8_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmacc_vf_bf16m4_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1,
+ vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16m4_f32m8_rm_tumu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmacc_vv_bf16mf4_f32mf2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmacc_vv_bf16mf4_f32mf2_rm_mu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16mf4_f32mf2_rm_mu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmacc_vf_bf16mf4_f32mf2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmacc_vf_bf16mf4_f32mf2_rm_mu(
+ vbool64_t vm, vfloat32mf2_t vd, __bf16 vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16mf4_f32mf2_rm_mu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmacc_vv_bf16mf2_f32m1_rm_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmacc_vv_bf16mf2_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16mf2_f32m1_rm_mu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmacc_vf_bf16mf2_f32m1_rm_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmacc_vf_bf16mf2_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16mf2_f32m1_rm_mu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmacc_vv_bf16m1_f32m2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmacc_vv_bf16m1_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16m1_f32m2_rm_mu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmacc_vf_bf16m1_f32m2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmacc_vf_bf16m1_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16m1_f32m2_rm_mu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmacc_vv_bf16m2_f32m4_rm_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmacc_vv_bf16m2_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16m2_f32m4_rm_mu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmacc_vf_bf16m2_f32m4_rm_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmacc_vf_bf16m2_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16m2_f32m4_rm_mu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmacc_vv_bf16m4_f32m8_rm_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmacc_vv_bf16m4_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_vv_bf16m4_f32m8_rm_mu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmacc_vf_bf16m4_f32m8_rm_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmacc_vf_bf16m4_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_vf_bf16m4_f32m8_rm_mu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfwnmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfwnmsac.c
new file mode 100644
index 0000000000000..6133230e5cd84
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfwnmsac.c
@@ -0,0 +1,1034 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmsac_vv_bf16mf4_f32mf2_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmsac_vv_bf16mf4_f32mf2_tu(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16mf4_f32mf2_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmsac_vf_bf16mf4_f32mf2_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmsac_vf_bf16mf4_f32mf2_tu(vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16mf4_f32mf2_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmsac_vv_bf16mf2_f32m1_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmsac_vv_bf16mf2_f32m1_tu(vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16mf2_f32m1_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmsac_vf_bf16mf2_f32m1_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmsac_vf_bf16mf2_f32m1_tu(vfloat32m1_t vd, __bf16 vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16mf2_f32m1_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmsac_vv_bf16m1_f32m2_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmsac_vv_bf16m1_f32m2_tu(vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16m1_f32m2_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmsac_vf_bf16m1_f32m2_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmsac_vf_bf16m1_f32m2_tu(vfloat32m2_t vd, __bf16 vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16m1_f32m2_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmsac_vv_bf16m2_f32m4_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmsac_vv_bf16m2_f32m4_tu(vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16m2_f32m4_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmsac_vf_bf16m2_f32m4_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmsac_vf_bf16m2_f32m4_tu(vfloat32m4_t vd, __bf16 vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16m2_f32m4_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmsac_vv_bf16m4_f32m8_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmsac_vv_bf16m4_f32m8_tu(vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16m4_f32m8_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmsac_vf_bf16m4_f32m8_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmsac_vf_bf16m4_f32m8_tu(vfloat32m8_t vd, __bf16 vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16m4_f32m8_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmsac_vv_bf16mf4_f32mf2_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmsac_vv_bf16mf4_f32mf2_tum(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16mf4_f32mf2_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmsac_vf_bf16mf4_f32mf2_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmsac_vf_bf16mf4_f32mf2_tum(vbool64_t vm,
+ vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16mf4_f32mf2_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmsac_vv_bf16mf2_f32m1_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmsac_vv_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16mf2_f32m1_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmsac_vf_bf16mf2_f32m1_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmsac_vf_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16mf2_f32m1_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmsac_vv_bf16m1_f32m2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmsac_vv_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16m1_f32m2_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmsac_vf_bf16m1_f32m2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmsac_vf_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16m1_f32m2_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmsac_vv_bf16m2_f32m4_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmsac_vv_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16m2_f32m4_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmsac_vf_bf16m2_f32m4_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmsac_vf_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16m2_f32m4_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmsac_vv_bf16m4_f32m8_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmsac_vv_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16m4_f32m8_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmsac_vf_bf16m4_f32m8_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmsac_vf_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16m4_f32m8_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmsac_vv_bf16mf4_f32mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmsac_vv_bf16mf4_f32mf2_tumu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16mf4_f32mf2_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmsac_vf_bf16mf4_f32mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmsac_vf_bf16mf4_f32mf2_tumu(vbool64_t vm,
+ vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16mf4_f32mf2_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmsac_vv_bf16mf2_f32m1_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmsac_vv_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16mf2_f32m1_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmsac_vf_bf16mf2_f32m1_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmsac_vf_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16mf2_f32m1_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmsac_vv_bf16m1_f32m2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmsac_vv_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16m1_f32m2_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmsac_vf_bf16m1_f32m2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmsac_vf_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16m1_f32m2_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmsac_vv_bf16m2_f32m4_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmsac_vv_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16m2_f32m4_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmsac_vf_bf16m2_f32m4_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmsac_vf_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16m2_f32m4_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmsac_vv_bf16m4_f32m8_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmsac_vv_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16m4_f32m8_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmsac_vf_bf16m4_f32m8_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmsac_vf_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16m4_f32m8_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmsac_vv_bf16mf4_f32mf2_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmsac_vv_bf16mf4_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16mf4_f32mf2_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmsac_vf_bf16mf4_f32mf2_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmsac_vf_bf16mf4_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd,
+ __bf16 vs1, vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16mf4_f32mf2_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmsac_vv_bf16mf2_f32m1_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmsac_vv_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16mf2_f32m1_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmsac_vf_bf16mf2_f32m1_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmsac_vf_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16mf2_f32m1_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmsac_vv_bf16m1_f32m2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmsac_vv_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16m1_f32m2_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmsac_vf_bf16m1_f32m2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmsac_vf_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16m1_f32m2_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmsac_vv_bf16m2_f32m4_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmsac_vv_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16m2_f32m4_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmsac_vf_bf16m2_f32m4_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmsac_vf_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16m2_f32m4_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmsac_vv_bf16m4_f32m8_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmsac_vv_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16m4_f32m8_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmsac_vf_bf16m4_f32m8_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmsac_vf_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16m4_f32m8_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmsac_vv_bf16mf4_f32mf2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmsac_vv_bf16mf4_f32mf2_rm_tu(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16mf4_f32mf2_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmsac_vf_bf16mf4_f32mf2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmsac_vf_bf16mf4_f32mf2_rm_tu(vfloat32mf2_t vd,
+ __bf16 vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16mf4_f32mf2_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmsac_vv_bf16mf2_f32m1_rm_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmsac_vv_bf16mf2_f32m1_rm_tu(vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16mf2_f32m1_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmsac_vf_bf16mf2_f32m1_rm_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmsac_vf_bf16mf2_f32m1_rm_tu(vfloat32m1_t vd, __bf16 vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16mf2_f32m1_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmsac_vv_bf16m1_f32m2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmsac_vv_bf16m1_f32m2_rm_tu(vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16m1_f32m2_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmsac_vf_bf16m1_f32m2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmsac_vf_bf16m1_f32m2_rm_tu(vfloat32m2_t vd, __bf16 vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16m1_f32m2_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmsac_vv_bf16m2_f32m4_rm_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmsac_vv_bf16m2_f32m4_rm_tu(vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16m2_f32m4_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmsac_vf_bf16m2_f32m4_rm_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmsac_vf_bf16m2_f32m4_rm_tu(vfloat32m4_t vd, __bf16 vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16m2_f32m4_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmsac_vv_bf16m4_f32m8_rm_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmsac_vv_bf16m4_f32m8_rm_tu(vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16m4_f32m8_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmsac_vf_bf16m4_f32m8_rm_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmsac_vf_bf16m4_f32m8_rm_tu(vfloat32m8_t vd, __bf16 vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16m4_f32m8_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmsac_vv_bf16mf4_f32mf2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmsac_vv_bf16mf4_f32mf2_rm_tum(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16mf4_f32mf2_rm_tum(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmsac_vf_bf16mf4_f32mf2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmsac_vf_bf16mf4_f32mf2_rm_tum(
+ vbool64_t vm, vfloat32mf2_t vd, __bf16 vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16mf4_f32mf2_rm_tum(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmsac_vv_bf16mf2_f32m1_rm_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmsac_vv_bf16mf2_f32m1_rm_tum(vbool32_t vm,
+ vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16mf2_f32m1_rm_tum(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmsac_vf_bf16mf2_f32m1_rm_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmsac_vf_bf16mf2_f32m1_rm_tum(vbool32_t vm,
+ vfloat32m1_t vd, __bf16 vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16mf2_f32m1_rm_tum(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmsac_vv_bf16m1_f32m2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmsac_vv_bf16m1_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16m1_f32m2_rm_tum(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmsac_vf_bf16m1_f32m2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmsac_vf_bf16m1_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16m1_f32m2_rm_tum(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmsac_vv_bf16m2_f32m4_rm_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmsac_vv_bf16m2_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16m2_f32m4_rm_tum(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmsac_vf_bf16m2_f32m4_rm_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmsac_vf_bf16m2_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16m2_f32m4_rm_tum(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmsac_vv_bf16m4_f32m8_rm_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmsac_vv_bf16m4_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16m4_f32m8_rm_tum(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmsac_vf_bf16m4_f32m8_rm_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmsac_vf_bf16m4_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16m4_f32m8_rm_tum(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmsac_vv_bf16mf4_f32mf2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmsac_vv_bf16mf4_f32mf2_rm_tumu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16mf4_f32mf2_rm_tumu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmsac_vf_bf16mf4_f32mf2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmsac_vf_bf16mf4_f32mf2_rm_tumu(
+ vbool64_t vm, vfloat32mf2_t vd, __bf16 vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16mf4_f32mf2_rm_tumu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmsac_vv_bf16mf2_f32m1_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmsac_vv_bf16mf2_f32m1_rm_tumu(vbool32_t vm,
+ vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16mf2_f32m1_rm_tumu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmsac_vf_bf16mf2_f32m1_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmsac_vf_bf16mf2_f32m1_rm_tumu(vbool32_t vm,
+ vfloat32m1_t vd, __bf16 vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16mf2_f32m1_rm_tumu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmsac_vv_bf16m1_f32m2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmsac_vv_bf16m1_f32m2_rm_tumu(vbool16_t vm,
+ vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16m1_f32m2_rm_tumu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmsac_vf_bf16m1_f32m2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmsac_vf_bf16m1_f32m2_rm_tumu(vbool16_t vm,
+ vfloat32m2_t vd, __bf16 vs1,
+ vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16m1_f32m2_rm_tumu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmsac_vv_bf16m2_f32m4_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmsac_vv_bf16m2_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16m2_f32m4_rm_tumu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmsac_vf_bf16m2_f32m4_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmsac_vf_bf16m2_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1,
+ vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16m2_f32m4_rm_tumu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmsac_vv_bf16m4_f32m8_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmsac_vv_bf16m4_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16m4_f32m8_rm_tumu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmsac_vf_bf16m4_f32m8_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmsac_vf_bf16m4_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1,
+ vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16m4_f32m8_rm_tumu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmsac_vv_bf16mf4_f32mf2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmsac_vv_bf16mf4_f32mf2_rm_mu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16mf4_f32mf2_rm_mu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmsac_vf_bf16mf4_f32mf2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmsac_vf_bf16mf4_f32mf2_rm_mu(
+ vbool64_t vm, vfloat32mf2_t vd, __bf16 vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16mf4_f32mf2_rm_mu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmsac_vv_bf16mf2_f32m1_rm_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmsac_vv_bf16mf2_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16mf2_f32m1_rm_mu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmsac_vf_bf16mf2_f32m1_rm_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmsac_vf_bf16mf2_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16mf2_f32m1_rm_mu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmsac_vv_bf16m1_f32m2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmsac_vv_bf16m1_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16m1_f32m2_rm_mu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmsac_vf_bf16m1_f32m2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmsac_vf_bf16m1_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16m1_f32m2_rm_mu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmsac_vv_bf16m2_f32m4_rm_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmsac_vv_bf16m2_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16m2_f32m4_rm_mu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmsac_vf_bf16m2_f32m4_rm_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmsac_vf_bf16m2_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16m2_f32m4_rm_mu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmsac_vv_bf16m4_f32m8_rm_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmsac_vv_bf16m4_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_vv_bf16m4_f32m8_rm_mu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmsac_vf_bf16m4_f32m8_rm_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmsac_vf_bf16m4_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_vf_bf16m4_f32m8_rm_mu(vm, vd, vs1, vs2,
+ __RISCV_FRM_RNE, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfwsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfwsub.c
new file mode 100644
index 0000000000000..9d9b0b0aa2f3c
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfwsub.c
@@ -0,0 +1,2007 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_vv_bf16mf4_f32mf2_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_vv_bf16mf4_f32mf2_tu(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_bf16mf4_f32mf2_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_vf_bf16mf4_f32mf2_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_vf_bf16mf4_f32mf2_tu(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_bf16mf4_f32mf2_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_wv_bf16mf4_f32mf2_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_wv_bf16mf4_f32mf2_tu(vfloat32mf2_t vd,
+ vfloat32mf2_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_bf16mf4_f32mf2_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_wf_bf16_f32mf2_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.nxv1f32.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_wf_bf16_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32mf2_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_vv_bf16mf2_f32m1_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_vv_bf16mf2_f32m1_tu(vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_bf16mf2_f32m1_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_vf_bf16mf2_f32m1_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_vf_bf16mf2_f32m1_tu(vfloat32m1_t vd,
+ vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_bf16mf2_f32m1_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_wv_bf16mf2_f32m1_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_wv_bf16mf2_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_bf16mf2_f32m1_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_wf_bf16_f32m1_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.nxv2f32.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_wf_bf16_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32m1_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_vv_bf16m1_f32m2_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_vv_bf16m1_f32m2_tu(vfloat32m2_t vd, vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_bf16m1_f32m2_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_vf_bf16m1_f32m2_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_vf_bf16m1_f32m2_tu(vfloat32m2_t vd, vbfloat16m1_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_vf_bf16m1_f32m2_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_wv_bf16m1_f32m2_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_wv_bf16m1_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_bf16m1_f32m2_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_wf_bf16_f32m2_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.nxv4f32.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_wf_bf16_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32m2_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_vv_bf16m2_f32m4_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_vv_bf16m2_f32m4_tu(vfloat32m4_t vd, vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_bf16m2_f32m4_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_vf_bf16m2_f32m4_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_vf_bf16m2_f32m4_tu(vfloat32m4_t vd, vbfloat16m2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_vf_bf16m2_f32m4_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_wv_bf16m2_f32m4_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_wv_bf16m2_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_bf16m2_f32m4_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_wf_bf16_f32m4_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.nxv8f32.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_wf_bf16_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32m4_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_vv_bf16m4_f32m8_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_vv_bf16m4_f32m8_tu(vfloat32m8_t vd, vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_bf16m4_f32m8_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_vf_bf16m4_f32m8_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_vf_bf16m4_f32m8_tu(vfloat32m8_t vd, vbfloat16m4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_vf_bf16m4_f32m8_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_wv_bf16m4_f32m8_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_wv_bf16m4_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_bf16m4_f32m8_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_wf_bf16_f32m8_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.nxv16f32.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_wf_bf16_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32m8_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_vv_bf16mf4_f32mf2_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_vv_bf16mf4_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_bf16mf4_f32mf2_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_vf_bf16mf4_f32mf2_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_vf_bf16mf4_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_bf16mf4_f32mf2_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_wv_bf16mf4_f32mf2_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.mask.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_wv_bf16mf4_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd,
+ vfloat32mf2_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_bf16mf4_f32mf2_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_wf_bf16_f32mf2_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.mask.nxv1f32.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_wf_bf16_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd,
+ vfloat32mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32mf2_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_vv_bf16mf2_f32m1_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_vv_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_bf16mf2_f32m1_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_vf_bf16mf2_f32m1_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_vf_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_bf16mf2_f32m1_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_wv_bf16mf2_f32m1_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.mask.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_wv_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd,
+ vfloat32m1_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_bf16mf2_f32m1_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_wf_bf16_f32m1_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.mask.nxv2f32.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_wf_bf16_f32m1_tum(vbool32_t vm, vfloat32m1_t vd,
+ vfloat32m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32m1_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_vv_bf16m1_f32m2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_vv_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_bf16m1_f32m2_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_vf_bf16m1_f32m2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_vf_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_bf16m1_f32m2_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_wv_bf16m1_f32m2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.mask.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_wv_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd,
+ vfloat32m2_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_bf16m1_f32m2_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_wf_bf16_f32m2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.mask.nxv4f32.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_wf_bf16_f32m2_tum(vbool16_t vm, vfloat32m2_t vd,
+ vfloat32m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32m2_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_vv_bf16m2_f32m4_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_vv_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_bf16m2_f32m4_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_vf_bf16m2_f32m4_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_vf_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_bf16m2_f32m4_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_wv_bf16m2_f32m4_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.mask.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_wv_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd,
+ vfloat32m4_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_bf16m2_f32m4_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_wf_bf16_f32m4_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.mask.nxv8f32.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_wf_bf16_f32m4_tum(vbool8_t vm, vfloat32m4_t vd,
+ vfloat32m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32m4_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_vv_bf16m4_f32m8_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_vv_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_bf16m4_f32m8_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_vf_bf16m4_f32m8_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_vf_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_bf16m4_f32m8_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_wv_bf16m4_f32m8_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.mask.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_wv_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd,
+ vfloat32m8_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_bf16m4_f32m8_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_wf_bf16_f32m8_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.mask.nxv16f32.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_wf_bf16_f32m8_tum(vbool4_t vm, vfloat32m8_t vd,
+ vfloat32m8_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32m8_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_vv_bf16mf4_f32mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_vv_bf16mf4_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_vv_bf16mf4_f32mf2_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_vf_bf16mf4_f32mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_vf_bf16mf4_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_bf16mf4_f32mf2_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_wv_bf16mf4_f32mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.mask.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_wv_bf16mf4_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd,
+ vfloat32mf2_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_wv_bf16mf4_f32mf2_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_wf_bf16_f32mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.mask.nxv1f32.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_wf_bf16_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd,
+ vfloat32mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32mf2_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_vv_bf16mf2_f32m1_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_vv_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_bf16mf2_f32m1_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_vf_bf16mf2_f32m1_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_vf_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_bf16mf2_f32m1_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_wv_bf16mf2_f32m1_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.mask.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_wv_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vfloat32m1_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_bf16mf2_f32m1_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_wf_bf16_f32m1_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.mask.nxv2f32.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_wf_bf16_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vfloat32m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32m1_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_vv_bf16m1_f32m2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_vv_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_bf16m1_f32m2_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_vf_bf16m1_f32m2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_vf_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_bf16m1_f32m2_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_wv_bf16m1_f32m2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.mask.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_wv_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vfloat32m2_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_bf16m1_f32m2_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_wf_bf16_f32m2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.mask.nxv4f32.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_wf_bf16_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vfloat32m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32m2_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_vv_bf16m2_f32m4_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_vv_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_bf16m2_f32m4_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_vf_bf16m2_f32m4_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_vf_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_bf16m2_f32m4_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_wv_bf16m2_f32m4_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.mask.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_wv_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vfloat32m4_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_bf16m2_f32m4_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_wf_bf16_f32m4_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.mask.nxv8f32.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_wf_bf16_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vfloat32m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32m4_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_vv_bf16m4_f32m8_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_vv_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_bf16m4_f32m8_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_vf_bf16m4_f32m8_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_vf_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_bf16m4_f32m8_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_wv_bf16m4_f32m8_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.mask.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_wv_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vfloat32m8_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_bf16m4_f32m8_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_wf_bf16_f32m8_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.mask.nxv16f32.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_wf_bf16_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vfloat32m8_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32m8_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_vv_bf16mf4_f32mf2_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_vv_bf16mf4_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_bf16mf4_f32mf2_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_vf_bf16mf4_f32mf2_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_vf_bf16mf4_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_bf16mf4_f32mf2_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_wv_bf16mf4_f32mf2_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.mask.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_wv_bf16mf4_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd,
+ vfloat32mf2_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_bf16mf4_f32mf2_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_wf_bf16_f32mf2_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.mask.nxv1f32.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_wf_bf16_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd,
+ vfloat32mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32mf2_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_vv_bf16mf2_f32m1_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_vv_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_bf16mf2_f32m1_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_vf_bf16mf2_f32m1_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_vf_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_bf16mf2_f32m1_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_wv_bf16mf2_f32m1_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.mask.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_wv_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd,
+ vfloat32m1_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_bf16mf2_f32m1_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_wf_bf16_f32m1_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.mask.nxv2f32.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_wf_bf16_f32m1_mu(vbool32_t vm, vfloat32m1_t vd,
+ vfloat32m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32m1_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_vv_bf16m1_f32m2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_vv_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_bf16m1_f32m2_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_vf_bf16m1_f32m2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_vf_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_bf16m1_f32m2_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_wv_bf16m1_f32m2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.mask.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_wv_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd,
+ vfloat32m2_t vs2, vbfloat16m1_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_wv_bf16m1_f32m2_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_wf_bf16_f32m2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.mask.nxv4f32.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_wf_bf16_f32m2_mu(vbool16_t vm, vfloat32m2_t vd,
+ vfloat32m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32m2_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_vv_bf16m2_f32m4_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_vv_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_bf16m2_f32m4_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_vf_bf16m2_f32m4_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_vf_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_bf16m2_f32m4_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_wv_bf16m2_f32m4_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.mask.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_wv_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd,
+ vfloat32m4_t vs2, vbfloat16m2_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_wv_bf16m2_f32m4_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_wf_bf16_f32m4_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.mask.nxv8f32.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_wf_bf16_f32m4_mu(vbool8_t vm, vfloat32m4_t vd,
+ vfloat32m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32m4_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_vv_bf16m4_f32m8_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_vv_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_bf16m4_f32m8_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_vf_bf16m4_f32m8_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_vf_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_bf16m4_f32m8_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_wv_bf16m4_f32m8_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.mask.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_wv_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd,
+ vfloat32m8_t vs2, vbfloat16m4_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_wv_bf16m4_f32m8_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_wf_bf16_f32m8_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.mask.nxv16f32.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_wf_bf16_f32m8_mu(vbool4_t vm, vfloat32m8_t vd,
+ vfloat32m8_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32m8_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_vv_bf16mf4_f32mf2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_vv_bf16mf4_f32mf2_rm_tu(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_vv_bf16mf4_f32mf2_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_vf_bf16mf4_f32mf2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_vf_bf16mf4_f32mf2_rm_tu(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_vf_bf16mf4_f32mf2_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_wv_bf16mf4_f32mf2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_wv_bf16mf4_f32mf2_rm_tu(vfloat32mf2_t vd,
+ vfloat32mf2_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_wv_bf16mf4_f32mf2_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_wf_bf16_f32mf2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.nxv1f32.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_wf_bf16_f32mf2_rm_tu(vfloat32mf2_t vd,
+ vfloat32mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32mf2_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_vv_bf16mf2_f32m1_rm_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_vv_bf16mf2_f32m1_rm_tu(vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_bf16mf2_f32m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_vf_bf16mf2_f32m1_rm_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_vf_bf16mf2_f32m1_rm_tu(vfloat32m1_t vd,
+ vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_bf16mf2_f32m1_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_wv_bf16mf2_f32m1_rm_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_wv_bf16mf2_f32m1_rm_tu(vfloat32m1_t vd,
+ vfloat32m1_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_bf16mf2_f32m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_wf_bf16_f32m1_rm_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.nxv2f32.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_wf_bf16_f32m1_rm_tu(vfloat32m1_t vd, vfloat32m1_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32m1_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_vv_bf16m1_f32m2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_vv_bf16m1_f32m2_rm_tu(vfloat32m2_t vd,
+ vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_bf16m1_f32m2_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_vf_bf16m1_f32m2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_vf_bf16m1_f32m2_rm_tu(vfloat32m2_t vd,
+ vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_bf16m1_f32m2_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_wv_bf16m1_f32m2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_wv_bf16m1_f32m2_rm_tu(vfloat32m2_t vd,
+ vfloat32m2_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_bf16m1_f32m2_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_wf_bf16_f32m2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.nxv4f32.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_wf_bf16_f32m2_rm_tu(vfloat32m2_t vd, vfloat32m2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32m2_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_vv_bf16m2_f32m4_rm_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_vv_bf16m2_f32m4_rm_tu(vfloat32m4_t vd,
+ vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_bf16m2_f32m4_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_vf_bf16m2_f32m4_rm_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_vf_bf16m2_f32m4_rm_tu(vfloat32m4_t vd,
+ vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_bf16m2_f32m4_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_wv_bf16m2_f32m4_rm_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_wv_bf16m2_f32m4_rm_tu(vfloat32m4_t vd,
+ vfloat32m4_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_bf16m2_f32m4_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_wf_bf16_f32m4_rm_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.nxv8f32.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_wf_bf16_f32m4_rm_tu(vfloat32m4_t vd, vfloat32m4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32m4_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_vv_bf16m4_f32m8_rm_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_vv_bf16m4_f32m8_rm_tu(vfloat32m8_t vd,
+ vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_bf16m4_f32m8_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_vf_bf16m4_f32m8_rm_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_vf_bf16m4_f32m8_rm_tu(vfloat32m8_t vd,
+ vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_bf16m4_f32m8_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_wv_bf16m4_f32m8_rm_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_wv_bf16m4_f32m8_rm_tu(vfloat32m8_t vd,
+ vfloat32m8_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_bf16m4_f32m8_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_wf_bf16_f32m8_rm_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.nxv16f32.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_wf_bf16_f32m8_rm_tu(vfloat32m8_t vd, vfloat32m8_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32m8_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_vv_bf16mf4_f32mf2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_vv_bf16mf4_f32mf2_rm_tum(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_vv_bf16mf4_f32mf2_rm_tum(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_vf_bf16mf4_f32mf2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_vf_bf16mf4_f32mf2_rm_tum(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_vf_bf16mf4_f32mf2_rm_tum(vm, vd, vs2, rs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_wv_bf16mf4_f32mf2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.mask.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_wv_bf16mf4_f32mf2_rm_tum(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vfloat32mf2_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_wv_bf16mf4_f32mf2_rm_tum(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_wf_bf16_f32mf2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.mask.nxv1f32.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_wf_bf16_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd,
+ vfloat32mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32mf2_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_vv_bf16mf2_f32m1_rm_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_vv_bf16mf2_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_vv_bf16mf2_f32m1_rm_tum(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_vf_bf16mf2_f32m1_rm_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_vf_bf16mf2_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_bf16mf2_f32m1_rm_tum(vm, vd, vs2, rs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_wv_bf16mf2_f32m1_rm_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.mask.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_wv_bf16mf2_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd,
+ vfloat32m1_t vs2,
+ vbfloat16mf2_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_wv_bf16mf2_f32m1_rm_tum(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_wf_bf16_f32m1_rm_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.mask.nxv2f32.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_wf_bf16_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd,
+ vfloat32m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32m1_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_vv_bf16m1_f32m2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_vv_bf16m1_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_bf16m1_f32m2_rm_tum(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_vf_bf16m1_f32m2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_vf_bf16m1_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_bf16m1_f32m2_rm_tum(vm, vd, vs2, rs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_wv_bf16m1_f32m2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.mask.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_wv_bf16m1_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd,
+ vfloat32m2_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_bf16m1_f32m2_rm_tum(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_wf_bf16_f32m2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.mask.nxv4f32.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_wf_bf16_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd,
+ vfloat32m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32m2_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_vv_bf16m2_f32m4_rm_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_vv_bf16m2_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_bf16m2_f32m4_rm_tum(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_vf_bf16m2_f32m4_rm_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_vf_bf16m2_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_bf16m2_f32m4_rm_tum(vm, vd, vs2, rs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_wv_bf16m2_f32m4_rm_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.mask.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_wv_bf16m2_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd,
+ vfloat32m4_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_bf16m2_f32m4_rm_tum(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_wf_bf16_f32m4_rm_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.mask.nxv8f32.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_wf_bf16_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd,
+ vfloat32m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32m4_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_vv_bf16m4_f32m8_rm_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_vv_bf16m4_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_bf16m4_f32m8_rm_tum(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_vf_bf16m4_f32m8_rm_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_vf_bf16m4_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_bf16m4_f32m8_rm_tum(vm, vd, vs2, rs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_wv_bf16m4_f32m8_rm_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.mask.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_wv_bf16m4_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd,
+ vfloat32m8_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_bf16m4_f32m8_rm_tum(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_wf_bf16_f32m8_rm_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.mask.nxv16f32.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_wf_bf16_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd,
+ vfloat32m8_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32m8_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_vv_bf16mf4_f32mf2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_vv_bf16mf4_f32mf2_rm_tumu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_vv_bf16mf4_f32mf2_rm_tumu(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_vf_bf16mf4_f32mf2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_vf_bf16mf4_f32mf2_rm_tumu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_vf_bf16mf4_f32mf2_rm_tumu(vm, vd, vs2, rs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_wv_bf16mf4_f32mf2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.mask.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_wv_bf16mf4_f32mf2_rm_tumu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vfloat32mf2_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_wv_bf16mf4_f32mf2_rm_tumu(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_wf_bf16_f32mf2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.mask.nxv1f32.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_wf_bf16_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd,
+ vfloat32mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32mf2_rm_tumu(vm, vd, vs2, rs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_vv_bf16mf2_f32m1_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_vv_bf16mf2_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_vv_bf16mf2_f32m1_rm_tumu(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_vf_bf16mf2_f32m1_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_vf_bf16mf2_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_vf_bf16mf2_f32m1_rm_tumu(vm, vd, vs2, rs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_wv_bf16mf2_f32m1_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.mask.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_wv_bf16mf2_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vfloat32m1_t vs2,
+ vbfloat16mf2_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_wv_bf16mf2_f32m1_rm_tumu(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_wf_bf16_f32m1_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.mask.nxv2f32.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_wf_bf16_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vfloat32m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32m1_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_vv_bf16m1_f32m2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_vv_bf16m1_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_bf16m1_f32m2_rm_tumu(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_vf_bf16m1_f32m2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_vf_bf16m1_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_bf16m1_f32m2_rm_tumu(vm, vd, vs2, rs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_wv_bf16m1_f32m2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.mask.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_wv_bf16m1_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vfloat32m2_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_bf16m1_f32m2_rm_tumu(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_wf_bf16_f32m2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.mask.nxv4f32.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_wf_bf16_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vfloat32m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32m2_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_vv_bf16m2_f32m4_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_vv_bf16m2_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_bf16m2_f32m4_rm_tumu(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_vf_bf16m2_f32m4_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_vf_bf16m2_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_bf16m2_f32m4_rm_tumu(vm, vd, vs2, rs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_wv_bf16m2_f32m4_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.mask.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_wv_bf16m2_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vfloat32m4_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_bf16m2_f32m4_rm_tumu(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_wf_bf16_f32m4_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.mask.nxv8f32.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_wf_bf16_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vfloat32m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32m4_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_vv_bf16m4_f32m8_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_vv_bf16m4_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_bf16m4_f32m8_rm_tumu(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_vf_bf16m4_f32m8_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_vf_bf16m4_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_bf16m4_f32m8_rm_tumu(vm, vd, vs2, rs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_wv_bf16m4_f32m8_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.mask.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_wv_bf16m4_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vfloat32m8_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_bf16m4_f32m8_rm_tumu(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_wf_bf16_f32m8_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.mask.nxv16f32.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_wf_bf16_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vfloat32m8_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32m8_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_vv_bf16mf4_f32mf2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_vv_bf16mf4_f32mf2_rm_mu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_vv_bf16mf4_f32mf2_rm_mu(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_vf_bf16mf4_f32mf2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_vf_bf16mf4_f32mf2_rm_mu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_vf_bf16mf4_f32mf2_rm_mu(vm, vd, vs2, rs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_wv_bf16mf4_f32mf2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.mask.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_wv_bf16mf4_f32mf2_rm_mu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vfloat32mf2_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_wv_bf16mf4_f32mf2_rm_mu(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_wf_bf16_f32mf2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.mask.nxv1f32.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_wf_bf16_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd,
+ vfloat32mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32mf2_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_vv_bf16mf2_f32m1_rm_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_vv_bf16mf2_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_bf16mf2_f32m1_rm_mu(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_vf_bf16mf2_f32m1_rm_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_vf_bf16mf2_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_bf16mf2_f32m1_rm_mu(vm, vd, vs2, rs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_wv_bf16mf2_f32m1_rm_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.mask.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_wv_bf16mf2_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd,
+ vfloat32m1_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_bf16mf2_f32m1_rm_mu(vm, vd, vs2, vs1,
+ __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_wf_bf16_f32m1_rm_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.mask.nxv2f32.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_wf_bf16_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd,
+ vfloat32m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32m1_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_vv_bf16m1_f32m2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_vv_bf16m1_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_bf16m1_f32m2_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_vf_bf16m1_f32m2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_vf_bf16m1_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_bf16m1_f32m2_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_wv_bf16m1_f32m2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.mask.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_wv_bf16m1_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd,
+ vfloat32m2_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_bf16m1_f32m2_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_wf_bf16_f32m2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.mask.nxv4f32.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_wf_bf16_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd,
+ vfloat32m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32m2_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_vv_bf16m2_f32m4_rm_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_vv_bf16m2_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_bf16m2_f32m4_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_vf_bf16m2_f32m4_rm_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_vf_bf16m2_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_bf16m2_f32m4_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_wv_bf16m2_f32m4_rm_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.mask.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_wv_bf16m2_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd,
+ vfloat32m4_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_bf16m2_f32m4_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_wf_bf16_f32m4_rm_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.mask.nxv8f32.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_wf_bf16_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd,
+ vfloat32m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32m4_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_vv_bf16m4_f32m8_rm_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_vv_bf16m4_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_bf16m4_f32m8_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_vf_bf16m4_f32m8_rm_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_vf_bf16m4_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_bf16m4_f32m8_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_wv_bf16m4_f32m8_rm_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.mask.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_wv_bf16m4_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd,
+ vfloat32m8_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_bf16m4_f32m8_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE,
+ vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_wf_bf16_f32m8_rm_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.mask.nxv16f32.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_wf_bf16_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd,
+ vfloat32m8_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_bf16_f32m8_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE,
+ vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vmfeq.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vmfeq.c
new file mode 100644
index 0000000000000..b96aae5615db8
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vmfeq.c
@@ -0,0 +1,129 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfeq_vv_bf16mf4_b64_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfeq.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x i1> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfeq_vv_bf16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vmfeq_vv_bf16mf4_b64_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfeq_vf_bf16mf4_b64_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfeq.mask.nxv1bf16.bf16.i64(<vscale x 1 x i1> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfeq_vf_bf16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfeq_vf_bf16mf4_b64_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfeq_vv_bf16mf2_b32_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfeq.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x i1> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfeq_vv_bf16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vmfeq_vv_bf16mf2_b32_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfeq_vf_bf16mf2_b32_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfeq.mask.nxv2bf16.bf16.i64(<vscale x 2 x i1> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfeq_vf_bf16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfeq_vf_bf16mf2_b32_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfeq_vv_bf16m1_b16_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfeq.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x i1> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfeq_vv_bf16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vmfeq_vv_bf16m1_b16_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfeq_vf_bf16m1_b16_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfeq.mask.nxv4bf16.bf16.i64(<vscale x 4 x i1> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfeq_vf_bf16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfeq_vf_bf16m1_b16_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfeq_vv_bf16m2_b8_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfeq.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x i1> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfeq_vv_bf16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vmfeq_vv_bf16m2_b8_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfeq_vf_bf16m2_b8_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfeq.mask.nxv8bf16.bf16.i64(<vscale x 8 x i1> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfeq_vf_bf16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfeq_vf_bf16m2_b8_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfeq_vv_bf16m4_b4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfeq.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x i1> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfeq_vv_bf16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vmfeq_vv_bf16m4_b4_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfeq_vf_bf16m4_b4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfeq.mask.nxv16bf16.bf16.i64(<vscale x 16 x i1> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfeq_vf_bf16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfeq_vf_bf16m4_b4_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfeq_vv_bf16m8_b2_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfeq.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x i1> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfeq_vv_bf16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vmfeq_vv_bf16m8_b2_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfeq_vf_bf16m8_b2_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfeq.mask.nxv32bf16.bf16.i64(<vscale x 32 x i1> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfeq_vf_bf16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfeq_vf_bf16m8_b2_mu(mask, maskedoff, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vmfge.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vmfge.c
new file mode 100644
index 0000000000000..47d0427abf99b
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vmfge.c
@@ -0,0 +1,129 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfge_vv_bf16mf4_b64_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfge.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x i1> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfge_vv_bf16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vmfge_vv_bf16mf4_b64_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfge_vf_bf16mf4_b64_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfge.mask.nxv1bf16.bf16.i64(<vscale x 1 x i1> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfge_vf_bf16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfge_vf_bf16mf4_b64_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfge_vv_bf16mf2_b32_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfge.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x i1> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfge_vv_bf16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vmfge_vv_bf16mf2_b32_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfge_vf_bf16mf2_b32_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfge.mask.nxv2bf16.bf16.i64(<vscale x 2 x i1> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfge_vf_bf16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfge_vf_bf16mf2_b32_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfge_vv_bf16m1_b16_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfge.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x i1> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfge_vv_bf16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vmfge_vv_bf16m1_b16_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfge_vf_bf16m1_b16_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfge.mask.nxv4bf16.bf16.i64(<vscale x 4 x i1> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfge_vf_bf16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfge_vf_bf16m1_b16_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfge_vv_bf16m2_b8_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfge.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x i1> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfge_vv_bf16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vmfge_vv_bf16m2_b8_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfge_vf_bf16m2_b8_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfge.mask.nxv8bf16.bf16.i64(<vscale x 8 x i1> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfge_vf_bf16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfge_vf_bf16m2_b8_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfge_vv_bf16m4_b4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfge.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x i1> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfge_vv_bf16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vmfge_vv_bf16m4_b4_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfge_vf_bf16m4_b4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfge.mask.nxv16bf16.bf16.i64(<vscale x 16 x i1> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfge_vf_bf16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfge_vf_bf16m4_b4_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfge_vv_bf16m8_b2_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfge.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x i1> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfge_vv_bf16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vmfge_vv_bf16m8_b2_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfge_vf_bf16m8_b2_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfge.mask.nxv32bf16.bf16.i64(<vscale x 32 x i1> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfge_vf_bf16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfge_vf_bf16m8_b2_mu(mask, maskedoff, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vmfgt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vmfgt.c
new file mode 100644
index 0000000000000..0a0ead22361e9
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vmfgt.c
@@ -0,0 +1,129 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfgt_vv_bf16mf4_b64_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfgt.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x i1> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfgt_vv_bf16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vmfgt_vv_bf16mf4_b64_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfgt_vf_bf16mf4_b64_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfgt.mask.nxv1bf16.bf16.i64(<vscale x 1 x i1> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfgt_vf_bf16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfgt_vf_bf16mf4_b64_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfgt_vv_bf16mf2_b32_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfgt.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x i1> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfgt_vv_bf16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vmfgt_vv_bf16mf2_b32_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfgt_vf_bf16mf2_b32_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfgt.mask.nxv2bf16.bf16.i64(<vscale x 2 x i1> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfgt_vf_bf16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfgt_vf_bf16mf2_b32_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfgt_vv_bf16m1_b16_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfgt.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x i1> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfgt_vv_bf16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vmfgt_vv_bf16m1_b16_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfgt_vf_bf16m1_b16_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfgt.mask.nxv4bf16.bf16.i64(<vscale x 4 x i1> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfgt_vf_bf16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfgt_vf_bf16m1_b16_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfgt_vv_bf16m2_b8_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfgt.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x i1> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfgt_vv_bf16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vmfgt_vv_bf16m2_b8_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfgt_vf_bf16m2_b8_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfgt.mask.nxv8bf16.bf16.i64(<vscale x 8 x i1> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfgt_vf_bf16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfgt_vf_bf16m2_b8_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfgt_vv_bf16m4_b4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfgt.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x i1> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfgt_vv_bf16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vmfgt_vv_bf16m4_b4_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfgt_vf_bf16m4_b4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfgt.mask.nxv16bf16.bf16.i64(<vscale x 16 x i1> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfgt_vf_bf16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfgt_vf_bf16m4_b4_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfgt_vv_bf16m8_b2_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfgt.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x i1> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfgt_vv_bf16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vmfgt_vv_bf16m8_b2_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfgt_vf_bf16m8_b2_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfgt.mask.nxv32bf16.bf16.i64(<vscale x 32 x i1> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfgt_vf_bf16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfgt_vf_bf16m8_b2_mu(mask, maskedoff, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vmfle.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vmfle.c
new file mode 100644
index 0000000000000..27ddefec8c9c0
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vmfle.c
@@ -0,0 +1,129 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfle_vv_bf16mf4_b64_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfle.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x i1> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfle_vv_bf16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vmfle_vv_bf16mf4_b64_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfle_vf_bf16mf4_b64_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfle.mask.nxv1bf16.bf16.i64(<vscale x 1 x i1> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfle_vf_bf16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfle_vf_bf16mf4_b64_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfle_vv_bf16mf2_b32_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfle.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x i1> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfle_vv_bf16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vmfle_vv_bf16mf2_b32_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfle_vf_bf16mf2_b32_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfle.mask.nxv2bf16.bf16.i64(<vscale x 2 x i1> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfle_vf_bf16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfle_vf_bf16mf2_b32_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfle_vv_bf16m1_b16_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfle.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x i1> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfle_vv_bf16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vmfle_vv_bf16m1_b16_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfle_vf_bf16m1_b16_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfle.mask.nxv4bf16.bf16.i64(<vscale x 4 x i1> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfle_vf_bf16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfle_vf_bf16m1_b16_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfle_vv_bf16m2_b8_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfle.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x i1> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfle_vv_bf16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vmfle_vv_bf16m2_b8_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfle_vf_bf16m2_b8_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfle.mask.nxv8bf16.bf16.i64(<vscale x 8 x i1> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfle_vf_bf16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfle_vf_bf16m2_b8_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfle_vv_bf16m4_b4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfle.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x i1> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfle_vv_bf16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vmfle_vv_bf16m4_b4_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfle_vf_bf16m4_b4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfle.mask.nxv16bf16.bf16.i64(<vscale x 16 x i1> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfle_vf_bf16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfle_vf_bf16m4_b4_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfle_vv_bf16m8_b2_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfle.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x i1> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfle_vv_bf16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vmfle_vv_bf16m8_b2_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfle_vf_bf16m8_b2_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfle.mask.nxv32bf16.bf16.i64(<vscale x 32 x i1> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfle_vf_bf16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfle_vf_bf16m8_b2_mu(mask, maskedoff, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vmflt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vmflt.c
new file mode 100644
index 0000000000000..d5f4f777580d3
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vmflt.c
@@ -0,0 +1,129 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmflt_vv_bf16mf4_b64_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmflt.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x i1> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmflt_vv_bf16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vmflt_vv_bf16mf4_b64_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmflt_vf_bf16mf4_b64_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmflt.mask.nxv1bf16.bf16.i64(<vscale x 1 x i1> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmflt_vf_bf16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmflt_vf_bf16mf4_b64_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmflt_vv_bf16mf2_b32_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmflt.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x i1> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmflt_vv_bf16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vmflt_vv_bf16mf2_b32_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmflt_vf_bf16mf2_b32_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmflt.mask.nxv2bf16.bf16.i64(<vscale x 2 x i1> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmflt_vf_bf16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmflt_vf_bf16mf2_b32_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmflt_vv_bf16m1_b16_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmflt.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x i1> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmflt_vv_bf16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vmflt_vv_bf16m1_b16_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmflt_vf_bf16m1_b16_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmflt.mask.nxv4bf16.bf16.i64(<vscale x 4 x i1> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmflt_vf_bf16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmflt_vf_bf16m1_b16_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmflt_vv_bf16m2_b8_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmflt.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x i1> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmflt_vv_bf16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vmflt_vv_bf16m2_b8_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmflt_vf_bf16m2_b8_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmflt.mask.nxv8bf16.bf16.i64(<vscale x 8 x i1> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmflt_vf_bf16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmflt_vf_bf16m2_b8_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmflt_vv_bf16m4_b4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmflt.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x i1> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmflt_vv_bf16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vmflt_vv_bf16m4_b4_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmflt_vf_bf16m4_b4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmflt.mask.nxv16bf16.bf16.i64(<vscale x 16 x i1> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmflt_vf_bf16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmflt_vf_bf16m4_b4_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmflt_vv_bf16m8_b2_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmflt.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x i1> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmflt_vv_bf16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vmflt_vv_bf16m8_b2_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmflt_vf_bf16m8_b2_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmflt.mask.nxv32bf16.bf16.i64(<vscale x 32 x i1> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmflt_vf_bf16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmflt_vf_bf16m8_b2_mu(mask, maskedoff, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vmfne.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vmfne.c
new file mode 100644
index 0000000000000..c2df9474acc72
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vmfne.c
@@ -0,0 +1,129 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfne_vv_bf16mf4_b64_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x i1> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfne_vv_bf16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vmfne_vv_bf16mf4_b64_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfne_vf_bf16mf4_b64_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1bf16.bf16.i64(<vscale x 1 x i1> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfne_vf_bf16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfne_vf_bf16mf4_b64_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfne_vv_bf16mf2_b32_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfne.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x i1> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfne_vv_bf16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vmfne_vv_bf16mf2_b32_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfne_vf_bf16mf2_b32_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfne.mask.nxv2bf16.bf16.i64(<vscale x 2 x i1> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfne_vf_bf16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfne_vf_bf16mf2_b32_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfne_vv_bf16m1_b16_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfne.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x i1> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfne_vv_bf16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vmfne_vv_bf16m1_b16_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfne_vf_bf16m1_b16_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfne.mask.nxv4bf16.bf16.i64(<vscale x 4 x i1> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfne_vf_bf16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfne_vf_bf16m1_b16_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfne_vv_bf16m2_b8_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfne.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x i1> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfne_vv_bf16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vmfne_vv_bf16m2_b8_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfne_vf_bf16m2_b8_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfne.mask.nxv8bf16.bf16.i64(<vscale x 8 x i1> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfne_vf_bf16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfne_vf_bf16m2_b8_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfne_vv_bf16m4_b4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfne.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x i1> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfne_vv_bf16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vmfne_vv_bf16m4_b4_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfne_vf_bf16m4_b4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfne.mask.nxv16bf16.bf16.i64(<vscale x 16 x i1> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfne_vf_bf16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfne_vf_bf16m4_b4_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfne_vv_bf16m8_b2_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfne.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x i1> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfne_vv_bf16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vmfne_vv_bf16m8_b2_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfne_vf_bf16m8_b2_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfne.mask.nxv32bf16.bf16.i64(<vscale x 32 x i1> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfne_vf_bf16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfne_vf_bf16m8_b2_mu(mask, maskedoff, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfadd.c
new file mode 100644
index 0000000000000..2bd3b3995c6c8
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfadd.c
@@ -0,0 +1,489 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfadd_vv_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfadd.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfadd_vv_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfadd_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfadd_vf_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfadd.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfadd_vf_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfadd_vv_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfadd.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfadd_vv_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfadd_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfadd_vf_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfadd.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfadd_vf_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfadd_vv_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfadd.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfadd_vv_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfadd_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfadd_vf_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfadd.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfadd_vf_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfadd_vv_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfadd.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfadd_vv_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfadd_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfadd_vf_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfadd.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfadd_vf_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfadd_vv_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfadd.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfadd_vv_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfadd_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfadd_vf_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfadd.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfadd_vf_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfadd_vv_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfadd.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfadd_vv_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfadd_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfadd_vf_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfadd.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfadd_vf_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfadd_vv_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfadd.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfadd_vv_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfadd_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfadd_vf_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfadd.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfadd_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfadd_vv_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfadd.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfadd_vv_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfadd_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfadd_vf_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfadd.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfadd_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfadd_vv_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfadd.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfadd_vv_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfadd_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfadd_vf_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfadd.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfadd_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfadd_vv_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfadd.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfadd_vv_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfadd_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfadd_vf_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfadd.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfadd_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfadd_vv_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfadd.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfadd_vv_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfadd_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfadd_vf_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfadd.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfadd_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfadd_vv_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfadd.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfadd_vv_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfadd_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfadd_vf_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfadd.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfadd_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfadd_vv_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfadd.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfadd_vv_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfadd_vf_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfadd.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfadd_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfadd_vv_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfadd.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfadd_vv_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfadd_vf_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfadd.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfadd_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfadd_vv_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfadd.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfadd_vv_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfadd_vf_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfadd.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfadd_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfadd_vv_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfadd.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfadd_vv_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfadd_vf_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfadd.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfadd_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfadd_vv_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfadd.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfadd_vv_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfadd_vf_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfadd.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfadd_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfadd_vv_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfadd.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfadd_vv_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfadd_vf_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfadd.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfadd_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfadd_vv_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfadd.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfadd_vv_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfadd_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfadd_vf_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfadd.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfadd_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfadd_vv_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfadd.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfadd_vv_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfadd_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfadd_vf_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfadd.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfadd_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfadd_vv_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfadd.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfadd_vv_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfadd_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfadd_vf_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfadd.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfadd_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfadd_vv_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfadd.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfadd_vv_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfadd_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfadd_vf_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfadd.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfadd_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfadd_vv_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfadd.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfadd_vv_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfadd_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfadd_vf_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfadd.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfadd_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfadd_vv_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfadd.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfadd_vv_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfadd_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfadd_vf_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfadd.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfadd_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfadd_mu(mask, maskedoff, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfclass.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfclass.c
new file mode 100644
index 0000000000000..e2a993aca7069
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfclass.c
@@ -0,0 +1,272 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vfclass_v_bf16mf4_u16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfclass.nxv1bf16.i64(<vscale x 1 x i16> [[VD]], <vscale x 1 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vfclass_v_bf16mf4_u16mf4_tu(vuint16mf4_t vd,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfclass_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vfclass_v_bf16mf2_u16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfclass.nxv2bf16.i64(<vscale x 2 x i16> [[VD]], <vscale x 2 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vfclass_v_bf16mf2_u16mf2_tu(vuint16mf2_t vd,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfclass_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vfclass_v_bf16m1_u16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfclass.nxv4bf16.i64(<vscale x 4 x i16> [[VD]], <vscale x 4 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vfclass_v_bf16m1_u16m1_tu(vuint16m1_t vd, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfclass_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vfclass_v_bf16m2_u16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfclass.nxv8bf16.i64(<vscale x 8 x i16> [[VD]], <vscale x 8 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vfclass_v_bf16m2_u16m2_tu(vuint16m2_t vd, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfclass_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vfclass_v_bf16m4_u16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfclass.nxv16bf16.i64(<vscale x 16 x i16> [[VD]], <vscale x 16 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vfclass_v_bf16m4_u16m4_tu(vuint16m4_t vd, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfclass_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vfclass_v_bf16m8_u16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vfclass.nxv32bf16.i64(<vscale x 32 x i16> [[VD]], <vscale x 32 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vfclass_v_bf16m8_u16m8_tu(vuint16m8_t vd, vbfloat16m8_t vs2,
+ size_t vl) {
+ return __riscv_vfclass_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vfclass_v_bf16mf4_u16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfclass.mask.nxv1bf16.i64(<vscale x 1 x i16> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vfclass_v_bf16mf4_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfclass_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vfclass_v_bf16mf2_u16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfclass.mask.nxv2bf16.i64(<vscale x 2 x i16> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vfclass_v_bf16mf2_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfclass_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vfclass_v_bf16m1_u16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfclass.mask.nxv4bf16.i64(<vscale x 4 x i16> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vfclass_v_bf16m1_u16m1_tum(vbool16_t vm, vuint16m1_t vd,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfclass_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vfclass_v_bf16m2_u16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfclass.mask.nxv8bf16.i64(<vscale x 8 x i16> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vfclass_v_bf16m2_u16m2_tum(vbool8_t vm, vuint16m2_t vd,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfclass_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vfclass_v_bf16m4_u16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfclass.mask.nxv16bf16.i64(<vscale x 16 x i16> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vfclass_v_bf16m4_u16m4_tum(vbool4_t vm, vuint16m4_t vd,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfclass_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vfclass_v_bf16m8_u16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vfclass.mask.nxv32bf16.i64(<vscale x 32 x i16> [[VD]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vfclass_v_bf16m8_u16m8_tum(vbool2_t vm, vuint16m8_t vd,
+ vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfclass_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vfclass_v_bf16mf4_u16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfclass.mask.nxv1bf16.i64(<vscale x 1 x i16> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vfclass_v_bf16mf4_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfclass_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vfclass_v_bf16mf2_u16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfclass.mask.nxv2bf16.i64(<vscale x 2 x i16> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vfclass_v_bf16mf2_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfclass_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vfclass_v_bf16m1_u16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfclass.mask.nxv4bf16.i64(<vscale x 4 x i16> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vfclass_v_bf16m1_u16m1_tumu(vbool16_t vm, vuint16m1_t vd,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfclass_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vfclass_v_bf16m2_u16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfclass.mask.nxv8bf16.i64(<vscale x 8 x i16> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vfclass_v_bf16m2_u16m2_tumu(vbool8_t vm, vuint16m2_t vd,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfclass_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vfclass_v_bf16m4_u16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfclass.mask.nxv16bf16.i64(<vscale x 16 x i16> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vfclass_v_bf16m4_u16m4_tumu(vbool4_t vm, vuint16m4_t vd,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfclass_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vfclass_v_bf16m8_u16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vfclass.mask.nxv32bf16.i64(<vscale x 32 x i16> [[VD]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vfclass_v_bf16m8_u16m8_tumu(vbool2_t vm, vuint16m8_t vd,
+ vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfclass_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vfclass_v_bf16mf4_u16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfclass.mask.nxv1bf16.i64(<vscale x 1 x i16> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vfclass_v_bf16mf4_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfclass_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vfclass_v_bf16mf2_u16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfclass.mask.nxv2bf16.i64(<vscale x 2 x i16> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vfclass_v_bf16mf2_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfclass_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vfclass_v_bf16m1_u16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfclass.mask.nxv4bf16.i64(<vscale x 4 x i16> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vfclass_v_bf16m1_u16m1_mu(vbool16_t vm, vuint16m1_t vd,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfclass_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vfclass_v_bf16m2_u16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfclass.mask.nxv8bf16.i64(<vscale x 8 x i16> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vfclass_v_bf16m2_u16m2_mu(vbool8_t vm, vuint16m2_t vd,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfclass_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vfclass_v_bf16m4_u16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfclass.mask.nxv16bf16.i64(<vscale x 16 x i16> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vfclass_v_bf16m4_u16m4_mu(vbool4_t vm, vuint16m4_t vd,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfclass_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vfclass_v_bf16m8_u16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vfclass.mask.nxv32bf16.i64(<vscale x 32 x i16> [[VD]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vfclass_v_bf16m8_u16m8_mu(vbool2_t vm, vuint16m8_t vd,
+ vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfclass_mu(vm, vd, vs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfmacc.c
new file mode 100644
index 0000000000000..eb7427107c942
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfmacc.c
@@ -0,0 +1,489 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmacc_vv_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmacc.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmacc_vv_bf16mf4_tu(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmacc_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmacc_vf_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmacc.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmacc_vf_bf16mf4_tu(vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmacc_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmacc_vv_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmacc.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmacc_vv_bf16mf2_tu(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmacc_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmacc_vf_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmacc.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmacc_vf_bf16mf2_tu(vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmacc_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmacc_vv_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmacc.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmacc_vv_bf16m1_tu(vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmacc_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmacc_vf_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmacc.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmacc_vf_bf16m1_tu(vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmacc_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmacc_vv_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmacc.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmacc_vv_bf16m2_tu(vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmacc_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmacc_vf_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmacc.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmacc_vf_bf16m2_tu(vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmacc_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmacc_vv_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmacc.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmacc_vv_bf16m4_tu(vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmacc_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmacc_vf_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmacc.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmacc_vf_bf16m4_tu(vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmacc_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmacc_vv_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmacc.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmacc_vv_bf16m8_tu(vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmacc_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmacc_vf_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmacc.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmacc_vf_bf16m8_tu(vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmacc_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmacc_vv_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmacc.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmacc_vv_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmacc_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmacc_vf_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmacc.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmacc_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmacc_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmacc_vv_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmacc.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmacc_vv_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmacc_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmacc_vf_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmacc.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmacc_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmacc_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmacc_vv_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmacc.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmacc_vv_bf16m1_tum(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmacc_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmacc_vf_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmacc.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmacc_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmacc_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmacc_vv_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmacc.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmacc_vv_bf16m2_tum(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmacc_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmacc_vf_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmacc.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmacc_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmacc_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmacc_vv_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmacc.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmacc_vv_bf16m4_tum(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmacc_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmacc_vf_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmacc.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmacc_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmacc_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmacc_vv_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmacc.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmacc_vv_bf16m8_tum(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmacc_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmacc_vf_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmacc.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmacc_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmacc_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmacc_vv_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmacc.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmacc_vv_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmacc_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmacc_vf_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmacc.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmacc_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmacc_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmacc_vv_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmacc.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmacc_vv_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmacc_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmacc_vf_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmacc.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmacc_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmacc_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmacc_vv_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmacc.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmacc_vv_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmacc_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmacc_vf_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmacc.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmacc_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmacc_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmacc_vv_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmacc.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmacc_vv_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmacc_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmacc_vf_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmacc.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmacc_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmacc_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmacc_vv_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmacc.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmacc_vv_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmacc_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmacc_vf_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmacc.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmacc_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmacc_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmacc_vv_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmacc.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmacc_vv_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmacc_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmacc_vf_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmacc.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmacc_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmacc_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmacc_vv_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmacc.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmacc_vv_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmacc_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmacc_vf_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmacc.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmacc_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmacc_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmacc_vv_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmacc.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmacc_vv_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmacc_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmacc_vf_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmacc.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmacc_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmacc_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmacc_vv_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmacc.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmacc_vv_bf16m1_mu(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmacc_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmacc_vf_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmacc.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmacc_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmacc_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmacc_vv_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmacc.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmacc_vv_bf16m2_mu(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmacc_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmacc_vf_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmacc.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmacc_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmacc_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmacc_vv_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmacc.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmacc_vv_bf16m4_mu(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmacc_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmacc_vf_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmacc.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmacc_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmacc_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmacc_vv_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmacc.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmacc_vv_bf16m8_mu(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmacc_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmacc_vf_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmacc.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmacc_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmacc_mu(mask, vd, rs1, vs2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfmadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfmadd.c
new file mode 100644
index 0000000000000..68d490d04ff86
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfmadd.c
@@ -0,0 +1,489 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmadd_vv_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmadd.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmadd_vv_bf16mf4_tu(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmadd_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmadd_vf_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmadd.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmadd_vf_bf16mf4_tu(vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmadd_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmadd_vv_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmadd.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmadd_vv_bf16mf2_tu(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmadd_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmadd_vf_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmadd.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmadd_vf_bf16mf2_tu(vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmadd_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmadd_vv_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmadd.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmadd_vv_bf16m1_tu(vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmadd_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmadd_vf_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmadd.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmadd_vf_bf16m1_tu(vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmadd_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmadd_vv_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmadd.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmadd_vv_bf16m2_tu(vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmadd_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmadd_vf_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmadd.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmadd_vf_bf16m2_tu(vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmadd_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmadd_vv_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmadd.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmadd_vv_bf16m4_tu(vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmadd_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmadd_vf_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmadd.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmadd_vf_bf16m4_tu(vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmadd_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmadd_vv_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmadd.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmadd_vv_bf16m8_tu(vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmadd_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmadd_vf_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmadd.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmadd_vf_bf16m8_tu(vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmadd_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmadd_vv_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmadd.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmadd_vv_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmadd_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmadd_vf_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmadd.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmadd_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmadd_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmadd_vv_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmadd.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmadd_vv_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmadd_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmadd_vf_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmadd.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmadd_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmadd_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmadd_vv_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmadd.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmadd_vv_bf16m1_tum(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmadd_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmadd_vf_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmadd.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmadd_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmadd_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmadd_vv_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmadd.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmadd_vv_bf16m2_tum(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmadd_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmadd_vf_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmadd.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmadd_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmadd_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmadd_vv_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmadd.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmadd_vv_bf16m4_tum(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmadd_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmadd_vf_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmadd.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmadd_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmadd_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmadd_vv_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmadd.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmadd_vv_bf16m8_tum(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmadd_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmadd_vf_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmadd.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmadd_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmadd_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmadd_vv_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmadd.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmadd_vv_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmadd_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmadd_vf_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmadd.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmadd_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmadd_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmadd_vv_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmadd.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmadd_vv_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmadd_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmadd_vf_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmadd.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmadd_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmadd_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmadd_vv_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmadd.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmadd_vv_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmadd_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmadd_vf_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmadd.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmadd_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmadd_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmadd_vv_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmadd.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmadd_vv_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmadd_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmadd_vf_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmadd.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmadd_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmadd_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmadd_vv_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmadd.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmadd_vv_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmadd_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmadd_vf_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmadd.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmadd_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmadd_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmadd_vv_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmadd.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmadd_vv_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmadd_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmadd_vf_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmadd.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmadd_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmadd_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmadd_vv_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmadd.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmadd_vv_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmadd_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmadd_vf_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmadd.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmadd_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmadd_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmadd_vv_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmadd.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmadd_vv_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmadd_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmadd_vf_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmadd.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmadd_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmadd_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmadd_vv_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmadd.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmadd_vv_bf16m1_mu(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmadd_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmadd_vf_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmadd.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmadd_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmadd_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmadd_vv_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmadd.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmadd_vv_bf16m2_mu(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmadd_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmadd_vf_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmadd.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmadd_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmadd_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmadd_vv_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmadd.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmadd_vv_bf16m4_mu(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmadd_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmadd_vf_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmadd.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmadd_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmadd_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmadd_vv_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmadd.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmadd_vv_bf16m8_mu(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmadd_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmadd_vf_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmadd.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmadd_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmadd_mu(mask, vd, rs1, vs2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfmax.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfmax.c
new file mode 100644
index 0000000000000..5f682e80b61b5
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfmax.c
@@ -0,0 +1,489 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmax_vv_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmax.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmax_vv_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfmax_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmax_vf_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmax.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmax_vf_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmax_vv_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmax.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmax_vv_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfmax_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmax_vf_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmax.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmax_vf_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmax_vv_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmax.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmax_vv_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfmax_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmax_vf_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmax.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmax_vf_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmax_vv_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmax.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmax_vv_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfmax_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmax_vf_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmax.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmax_vf_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmax_vv_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmax.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmax_vv_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfmax_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmax_vf_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmax.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmax_vf_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmax_vv_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmax.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmax_vv_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfmax_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmax_vf_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmax.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmax_vf_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmax_vv_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmax.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmax_vv_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfmax_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmax_vf_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmax.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmax_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmax_vv_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmax.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmax_vv_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfmax_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmax_vf_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmax.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmax_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmax_vv_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmax.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmax_vv_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfmax_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmax_vf_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmax.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmax_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmax_vv_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmax.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmax_vv_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfmax_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmax_vf_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmax.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmax_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmax_vv_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmax.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmax_vv_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfmax_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmax_vf_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmax.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmax_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmax_vv_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmax.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmax_vv_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfmax_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmax_vf_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmax.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmax_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmax_vv_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmax.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmax_vv_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfmax_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmax_vf_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmax.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmax_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmax_vv_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmax.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmax_vv_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfmax_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmax_vf_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmax.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmax_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmax_vv_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmax.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmax_vv_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfmax_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmax_vf_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmax.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmax_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmax_vv_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmax.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmax_vv_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfmax_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmax_vf_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmax.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmax_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmax_vv_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmax.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmax_vv_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfmax_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmax_vf_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmax.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmax_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmax_vv_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmax.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmax_vv_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfmax_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmax_vf_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmax.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmax_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmax_vv_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmax.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmax_vv_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfmax_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmax_vf_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmax.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmax_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmax_vv_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmax.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmax_vv_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfmax_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmax_vf_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmax.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmax_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmax_vv_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmax.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmax_vv_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfmax_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmax_vf_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmax.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmax_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmax_vv_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmax.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmax_vv_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfmax_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmax_vf_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmax.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmax_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmax_vv_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmax.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmax_vv_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfmax_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmax_vf_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmax.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmax_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmax_vv_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmax.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmax_vv_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfmax_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmax_vf_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmax.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmax_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmax_mu(mask, maskedoff, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfmerge.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfmerge.c
new file mode 100644
index 0000000000000..9593ad5b5a592
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfmerge.c
@@ -0,0 +1,69 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmerge_vfm_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmerge.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmerge_vfm_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, vbool64_t mask, size_t vl) {
+ return __riscv_vfmerge_tu(maskedoff, op1, op2, mask, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmerge_vfm_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmerge.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmerge_vfm_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, vbool32_t mask, size_t vl) {
+ return __riscv_vfmerge_tu(maskedoff, op1, op2, mask, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmerge_vfm_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmerge.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmerge_vfm_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, vbool16_t mask, size_t vl) {
+ return __riscv_vfmerge_tu(maskedoff, op1, op2, mask, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmerge_vfm_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmerge.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmerge_vfm_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, vbool8_t mask, size_t vl) {
+ return __riscv_vfmerge_tu(maskedoff, op1, op2, mask, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmerge_vfm_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmerge.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmerge_vfm_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, vbool4_t mask, size_t vl) {
+ return __riscv_vfmerge_tu(maskedoff, op1, op2, mask, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmerge_vfm_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmerge.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmerge_vfm_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, vbool2_t mask, size_t vl) {
+ return __riscv_vfmerge_tu(maskedoff, op1, op2, mask, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfmin.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfmin.c
new file mode 100644
index 0000000000000..f3ef3c3614027
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfmin.c
@@ -0,0 +1,489 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmin_vv_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmin.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmin_vv_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfmin_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmin_vf_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmin.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmin_vf_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmin_vv_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmin.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmin_vv_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfmin_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmin_vf_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmin.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmin_vf_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmin_vv_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmin.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmin_vv_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfmin_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmin_vf_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmin.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmin_vf_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmin_vv_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmin.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmin_vv_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfmin_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmin_vf_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmin.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmin_vf_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmin_vv_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmin.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmin_vv_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfmin_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmin_vf_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmin.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmin_vf_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmin_vv_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmin.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmin_vv_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfmin_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmin_vf_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmin.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmin_vf_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmin_vv_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmin.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmin_vv_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfmin_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmin_vf_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmin.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmin_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmin_vv_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmin.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmin_vv_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfmin_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmin_vf_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmin.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmin_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmin_vv_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmin.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmin_vv_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfmin_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmin_vf_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmin.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmin_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmin_vv_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmin.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmin_vv_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfmin_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmin_vf_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmin.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmin_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmin_vv_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmin.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmin_vv_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfmin_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmin_vf_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmin.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmin_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmin_vv_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmin.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmin_vv_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfmin_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmin_vf_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmin.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmin_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmin_vv_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmin.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmin_vv_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfmin_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmin_vf_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmin.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmin_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmin_vv_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmin.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmin_vv_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfmin_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmin_vf_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmin.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmin_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmin_vv_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmin.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmin_vv_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfmin_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmin_vf_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmin.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmin_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmin_vv_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmin.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmin_vv_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfmin_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmin_vf_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmin.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmin_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmin_vv_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmin.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmin_vv_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfmin_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmin_vf_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmin.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmin_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmin_vv_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmin.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmin_vv_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfmin_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmin_vf_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmin.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmin_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmin_vv_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmin.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmin_vv_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfmin_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmin_vf_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmin.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmin_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmin_vv_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmin.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmin_vv_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfmin_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmin_vf_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmin.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmin_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmin_vv_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmin.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmin_vv_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfmin_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmin_vf_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmin.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmin_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmin_vv_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmin.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmin_vv_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfmin_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmin_vf_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmin.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmin_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmin_vv_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmin.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmin_vv_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfmin_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmin_vf_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmin.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmin_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmin_vv_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmin.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmin_vv_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfmin_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmin_vf_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmin.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmin_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmin_mu(mask, maskedoff, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfmsac.c
new file mode 100644
index 0000000000000..0587c57af2b06
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfmsac.c
@@ -0,0 +1,489 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmsac_vv_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmsac.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmsac_vv_bf16mf4_tu(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmsac_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmsac_vf_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmsac.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmsac_vf_bf16mf4_tu(vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmsac_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmsac_vv_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmsac.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmsac_vv_bf16mf2_tu(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmsac_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmsac_vf_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmsac.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmsac_vf_bf16mf2_tu(vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmsac_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmsac_vv_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmsac.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmsac_vv_bf16m1_tu(vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmsac_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmsac_vf_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmsac.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmsac_vf_bf16m1_tu(vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmsac_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmsac_vv_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmsac.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmsac_vv_bf16m2_tu(vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmsac_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmsac_vf_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmsac.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmsac_vf_bf16m2_tu(vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmsac_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmsac_vv_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmsac.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmsac_vv_bf16m4_tu(vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmsac_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmsac_vf_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmsac.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmsac_vf_bf16m4_tu(vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmsac_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmsac_vv_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmsac.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmsac_vv_bf16m8_tu(vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmsac_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmsac_vf_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmsac.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmsac_vf_bf16m8_tu(vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmsac_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmsac_vv_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmsac.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmsac_vv_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmsac_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmsac_vf_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmsac.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmsac_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmsac_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmsac_vv_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmsac.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmsac_vv_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmsac_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmsac_vf_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmsac.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmsac_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmsac_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmsac_vv_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmsac.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmsac_vv_bf16m1_tum(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmsac_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmsac_vf_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmsac.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmsac_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmsac_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmsac_vv_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmsac.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmsac_vv_bf16m2_tum(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmsac_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmsac_vf_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmsac.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmsac_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmsac_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmsac_vv_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmsac.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmsac_vv_bf16m4_tum(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmsac_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmsac_vf_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmsac.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmsac_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmsac_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmsac_vv_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmsac.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmsac_vv_bf16m8_tum(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmsac_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmsac_vf_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmsac.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmsac_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmsac_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmsac_vv_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmsac.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmsac_vv_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmsac_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmsac_vf_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmsac.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmsac_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmsac_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmsac_vv_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmsac.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmsac_vv_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmsac_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmsac_vf_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmsac.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmsac_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmsac_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmsac_vv_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmsac.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmsac_vv_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmsac_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmsac_vf_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmsac.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmsac_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmsac_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmsac_vv_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmsac.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmsac_vv_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmsac_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmsac_vf_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmsac.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmsac_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmsac_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmsac_vv_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmsac.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmsac_vv_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmsac_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmsac_vf_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmsac.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmsac_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmsac_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmsac_vv_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmsac.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmsac_vv_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmsac_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmsac_vf_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmsac.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmsac_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmsac_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmsac_vv_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmsac.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmsac_vv_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmsac_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmsac_vf_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmsac.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmsac_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmsac_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmsac_vv_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmsac.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmsac_vv_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmsac_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmsac_vf_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmsac.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmsac_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmsac_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmsac_vv_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmsac.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmsac_vv_bf16m1_mu(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmsac_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmsac_vf_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmsac.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmsac_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmsac_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmsac_vv_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmsac.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmsac_vv_bf16m2_mu(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmsac_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmsac_vf_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmsac.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmsac_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmsac_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmsac_vv_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmsac.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmsac_vv_bf16m4_mu(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmsac_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmsac_vf_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmsac.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmsac_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmsac_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmsac_vv_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmsac.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmsac_vv_bf16m8_mu(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmsac_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmsac_vf_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmsac.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmsac_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmsac_mu(mask, vd, rs1, vs2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfmsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfmsub.c
new file mode 100644
index 0000000000000..2ad26f8b24e12
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfmsub.c
@@ -0,0 +1,489 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmsub_vv_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmsub.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmsub_vv_bf16mf4_tu(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmsub_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmsub_vf_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmsub.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmsub_vf_bf16mf4_tu(vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmsub_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmsub_vv_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmsub.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmsub_vv_bf16mf2_tu(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmsub_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmsub_vf_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmsub.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmsub_vf_bf16mf2_tu(vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmsub_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmsub_vv_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmsub.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmsub_vv_bf16m1_tu(vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmsub_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmsub_vf_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmsub.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmsub_vf_bf16m1_tu(vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmsub_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmsub_vv_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmsub.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmsub_vv_bf16m2_tu(vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmsub_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmsub_vf_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmsub.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmsub_vf_bf16m2_tu(vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmsub_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmsub_vv_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmsub.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmsub_vv_bf16m4_tu(vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmsub_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmsub_vf_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmsub.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmsub_vf_bf16m4_tu(vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmsub_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmsub_vv_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmsub.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmsub_vv_bf16m8_tu(vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmsub_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmsub_vf_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmsub.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmsub_vf_bf16m8_tu(vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmsub_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmsub_vv_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmsub.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmsub_vv_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmsub_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmsub_vf_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmsub.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmsub_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmsub_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmsub_vv_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmsub.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmsub_vv_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmsub_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmsub_vf_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmsub.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmsub_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmsub_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmsub_vv_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmsub.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmsub_vv_bf16m1_tum(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmsub_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmsub_vf_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmsub.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmsub_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmsub_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmsub_vv_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmsub.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmsub_vv_bf16m2_tum(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmsub_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmsub_vf_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmsub.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmsub_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmsub_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmsub_vv_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmsub.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmsub_vv_bf16m4_tum(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmsub_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmsub_vf_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmsub.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmsub_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmsub_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmsub_vv_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmsub.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmsub_vv_bf16m8_tum(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmsub_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmsub_vf_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmsub.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmsub_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmsub_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmsub_vv_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmsub.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmsub_vv_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmsub_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmsub_vf_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmsub.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmsub_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmsub_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmsub_vv_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmsub.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmsub_vv_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmsub_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmsub_vf_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmsub.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmsub_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmsub_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmsub_vv_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmsub.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmsub_vv_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmsub_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmsub_vf_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmsub.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmsub_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmsub_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmsub_vv_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmsub.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmsub_vv_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmsub_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmsub_vf_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmsub.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmsub_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmsub_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmsub_vv_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmsub.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmsub_vv_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmsub_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmsub_vf_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmsub.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmsub_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmsub_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmsub_vv_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmsub.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmsub_vv_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmsub_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmsub_vf_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmsub.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmsub_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmsub_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmsub_vv_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmsub.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmsub_vv_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmsub_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmsub_vf_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmsub.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmsub_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfmsub_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmsub_vv_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmsub.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmsub_vv_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmsub_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmsub_vf_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmsub.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmsub_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfmsub_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmsub_vv_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmsub.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmsub_vv_bf16m1_mu(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmsub_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmsub_vf_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmsub.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmsub_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfmsub_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmsub_vv_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmsub.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmsub_vv_bf16m2_mu(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmsub_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmsub_vf_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmsub.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmsub_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfmsub_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmsub_vv_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmsub.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmsub_vv_bf16m4_mu(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmsub_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmsub_vf_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmsub.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmsub_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfmsub_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmsub_vv_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmsub.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmsub_vv_bf16m8_mu(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmsub_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmsub_vf_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmsub.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmsub_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfmsub_mu(mask, vd, rs1, vs2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfmul.c
new file mode 100644
index 0000000000000..d1e726a9d63ea
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfmul.c
@@ -0,0 +1,489 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmul_vv_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmul.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmul_vv_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfmul_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmul_vf_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmul.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmul_vf_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmul_vv_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmul.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmul_vv_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfmul_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmul_vf_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmul.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmul_vf_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmul_vv_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmul.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmul_vv_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfmul_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmul_vf_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmul.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmul_vf_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmul_vv_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmul.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmul_vv_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfmul_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmul_vf_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmul.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmul_vf_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmul_vv_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmul.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmul_vv_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfmul_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmul_vf_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmul.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmul_vf_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmul_vv_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmul.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmul_vv_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfmul_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmul_vf_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmul.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmul_vf_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmul_vv_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmul.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmul_vv_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfmul_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmul_vf_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmul.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmul_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmul_vv_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmul.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmul_vv_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfmul_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmul_vf_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmul.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmul_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmul_vv_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmul.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmul_vv_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfmul_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmul_vf_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmul.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmul_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmul_vv_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmul.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmul_vv_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfmul_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmul_vf_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmul.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmul_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmul_vv_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmul.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmul_vv_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfmul_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmul_vf_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmul.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmul_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmul_vv_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmul.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmul_vv_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfmul_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmul_vf_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmul.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmul_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmul_vv_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmul.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmul_vv_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfmul_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmul_vf_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmul.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmul_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmul_vv_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmul.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmul_vv_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfmul_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmul_vf_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmul.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmul_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmul_vv_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmul.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmul_vv_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfmul_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmul_vf_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmul.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmul_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmul_vv_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmul.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmul_vv_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfmul_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmul_vf_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmul.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmul_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmul_vv_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmul.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmul_vv_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfmul_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmul_vf_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmul.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmul_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmul_vv_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmul.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmul_vv_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfmul_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmul_vf_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmul.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmul_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmul_vv_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmul.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmul_vv_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfmul_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmul_vf_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmul.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmul_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmul_vv_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmul.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmul_vv_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfmul_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmul_vf_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmul.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmul_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmul_vv_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmul.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmul_vv_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfmul_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmul_vf_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmul.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmul_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmul_vv_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmul.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmul_vv_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfmul_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmul_vf_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmul.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmul_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmul_vv_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmul.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmul_vv_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfmul_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmul_vf_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmul.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmul_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmul_vv_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmul.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmul_vv_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfmul_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmul_vf_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmul.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmul_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfmul_mu(mask, maskedoff, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfmv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfmv.c
new file mode 100644
index 0000000000000..9fd1ffce446bb
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfmv.c
@@ -0,0 +1,129 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmv_v_f_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmv.v.f.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], bfloat [[SRC]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmv_v_f_bf16mf4_tu(vbfloat16mf4_t maskedoff, __bf16 src, size_t vl) {
+ return __riscv_vfmv_v_tu(maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmv_v_f_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmv.v.f.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], bfloat [[SRC]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmv_v_f_bf16mf2_tu(vbfloat16mf2_t maskedoff, __bf16 src, size_t vl) {
+ return __riscv_vfmv_v_tu(maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmv_v_f_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmv.v.f.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], bfloat [[SRC]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmv_v_f_bf16m1_tu(vbfloat16m1_t maskedoff, __bf16 src, size_t vl) {
+ return __riscv_vfmv_v_tu(maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmv_v_f_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmv.v.f.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], bfloat [[SRC]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmv_v_f_bf16m2_tu(vbfloat16m2_t maskedoff, __bf16 src, size_t vl) {
+ return __riscv_vfmv_v_tu(maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmv_v_f_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmv.v.f.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], bfloat [[SRC]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmv_v_f_bf16m4_tu(vbfloat16m4_t maskedoff, __bf16 src, size_t vl) {
+ return __riscv_vfmv_v_tu(maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmv_v_f_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmv.v.f.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], bfloat [[SRC]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmv_v_f_bf16m8_tu(vbfloat16m8_t maskedoff, __bf16 src, size_t vl) {
+ return __riscv_vfmv_v_tu(maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfmv_s_f_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfmv.s.f.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], bfloat [[SRC]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfmv_s_f_bf16mf4_tu(vbfloat16mf4_t maskedoff, __bf16 src, size_t vl) {
+ return __riscv_vfmv_s_tu(maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfmv_s_f_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfmv.s.f.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], bfloat [[SRC]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfmv_s_f_bf16mf2_tu(vbfloat16mf2_t maskedoff, __bf16 src, size_t vl) {
+ return __riscv_vfmv_s_tu(maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfmv_s_f_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfmv.s.f.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], bfloat [[SRC]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfmv_s_f_bf16m1_tu(vbfloat16m1_t maskedoff, __bf16 src, size_t vl) {
+ return __riscv_vfmv_s_tu(maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfmv_s_f_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfmv.s.f.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], bfloat [[SRC]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfmv_s_f_bf16m2_tu(vbfloat16m2_t maskedoff, __bf16 src, size_t vl) {
+ return __riscv_vfmv_s_tu(maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfmv_s_f_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfmv.s.f.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], bfloat [[SRC]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfmv_s_f_bf16m4_tu(vbfloat16m4_t maskedoff, __bf16 src, size_t vl) {
+ return __riscv_vfmv_s_tu(maskedoff, src, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfmv_s_f_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfmv.s.f.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], bfloat [[SRC]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfmv_s_f_bf16m8_tu(vbfloat16m8_t maskedoff, __bf16 src, size_t vl) {
+ return __riscv_vfmv_s_tu(maskedoff, src, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfncvt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfncvt.c
new file mode 100644
index 0000000000000..c6cd0a55fa530
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfncvt.c
@@ -0,0 +1,1539 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_x_f_w_bf16mf4_i8mf8_tu(
+// CHECK-RV64-SAME: <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.x.f.w.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vfncvt_x_f_w_bf16mf4_i8mf8_tu(vint8mf8_t vd, vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_x_f_w_bf16mf2_i8mf4_tu(
+// CHECK-RV64-SAME: <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.x.f.w.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vfncvt_x_f_w_bf16mf2_i8mf4_tu(vint8mf4_t vd, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_x_f_w_bf16m1_i8mf2_tu(
+// CHECK-RV64-SAME: <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.x.f.w.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vfncvt_x_f_w_bf16m1_i8mf2_tu(vint8mf2_t vd, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_x_f_w_bf16m2_i8m1_tu(
+// CHECK-RV64-SAME: <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.x.f.w.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vfncvt_x_f_w_bf16m2_i8m1_tu(vint8m1_t vd, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_x_f_w_bf16m4_i8m2_tu(
+// CHECK-RV64-SAME: <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.x.f.w.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vfncvt_x_f_w_bf16m4_i8m2_tu(vint8m2_t vd, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_x_f_w_bf16m8_i8m4_tu(
+// CHECK-RV64-SAME: <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.x.f.w.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vfncvt_x_f_w_bf16m8_i8m4_tu(vint8m4_t vd, vbfloat16m8_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_xu_f_w_bf16mf4_u8mf8_tu(
+// CHECK-RV64-SAME: <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vfncvt_xu_f_w_bf16mf4_u8mf8_tu(vuint8mf8_t vd,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_xu_f_w_bf16mf2_u8mf4_tu(
+// CHECK-RV64-SAME: <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vfncvt_xu_f_w_bf16mf2_u8mf4_tu(vuint8mf4_t vd,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_xu_f_w_bf16m1_u8mf2_tu(
+// CHECK-RV64-SAME: <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vfncvt_xu_f_w_bf16m1_u8mf2_tu(vuint8mf2_t vd,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_xu_f_w_bf16m2_u8m1_tu(
+// CHECK-RV64-SAME: <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vfncvt_xu_f_w_bf16m2_u8m1_tu(vuint8m1_t vd, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_xu_f_w_bf16m4_u8m2_tu(
+// CHECK-RV64-SAME: <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vfncvt_xu_f_w_bf16m4_u8m2_tu(vuint8m2_t vd, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_xu_f_w_bf16m8_u8m4_tu(
+// CHECK-RV64-SAME: <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vfncvt_xu_f_w_bf16m8_u8m4_tu(vuint8m4_t vd, vbfloat16m8_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfncvt_f_f_w_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfncvt.f.f.w.nxv1bf16.nxv1f32.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x float> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfncvt_f_f_w_bf16mf4_tu(vbfloat16mf4_t vd,
+ vfloat32mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_bf16_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfncvt_f_f_w_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfncvt.f.f.w.nxv2bf16.nxv2f32.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x float> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfncvt_f_f_w_bf16mf2_tu(vbfloat16mf2_t vd, vfloat32m1_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_f_bf16_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfncvt_f_f_w_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfncvt.f.f.w.nxv4bf16.nxv4f32.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x float> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfncvt_f_f_w_bf16m1_tu(vbfloat16m1_t vd, vfloat32m2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_f_bf16_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfncvt_f_f_w_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfncvt.f.f.w.nxv8bf16.nxv8f32.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x float> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfncvt_f_f_w_bf16m2_tu(vbfloat16m2_t vd, vfloat32m4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_f_bf16_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfncvt_f_f_w_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfncvt.f.f.w.nxv16bf16.nxv16f32.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x float> [[VS2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfncvt_f_f_w_bf16m4_tu(vbfloat16m4_t vd, vfloat32m8_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_f_bf16_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_x_f_w_bf16mf4_i8mf8_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vfncvt_x_f_w_bf16mf4_i8mf8_tum(vbool64_t vm, vint8mf8_t vd,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_x_f_w_bf16mf2_i8mf4_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vfncvt_x_f_w_bf16mf2_i8mf4_tum(vbool32_t vm, vint8mf4_t vd,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_x_f_w_bf16m1_i8mf2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vfncvt_x_f_w_bf16m1_i8mf2_tum(vbool16_t vm, vint8mf2_t vd,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_x_f_w_bf16m2_i8m1_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vfncvt_x_f_w_bf16m2_i8m1_tum(vbool8_t vm, vint8m1_t vd,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_x_f_w_bf16m4_i8m2_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vfncvt_x_f_w_bf16m4_i8m2_tum(vbool4_t vm, vint8m2_t vd,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_x_f_w_bf16m8_i8m4_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vfncvt_x_f_w_bf16m8_i8m4_tum(vbool2_t vm, vint8m4_t vd,
+ vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_xu_f_w_bf16mf4_u8mf8_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vfncvt_xu_f_w_bf16mf4_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_xu_f_w_bf16mf2_u8mf4_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vfncvt_xu_f_w_bf16mf2_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_xu_f_w_bf16m1_u8mf2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vfncvt_xu_f_w_bf16m1_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_xu_f_w_bf16m2_u8m1_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vfncvt_xu_f_w_bf16m2_u8m1_tum(vbool8_t vm, vuint8m1_t vd,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_xu_f_w_bf16m4_u8m2_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vfncvt_xu_f_w_bf16m4_u8m2_tum(vbool4_t vm, vuint8m2_t vd,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_xu_f_w_bf16m8_u8m4_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vfncvt_xu_f_w_bf16m8_u8m4_tum(vbool2_t vm, vuint8m4_t vd,
+ vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfncvt_f_f_w_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv1bf16.nxv1f32.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfncvt_f_f_w_bf16mf4_tum(vbool64_t vm, vbfloat16mf4_t vd,
+ vfloat32mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_bf16_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfncvt_f_f_w_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv2bf16.nxv2f32.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfncvt_f_f_w_bf16mf2_tum(vbool32_t vm, vbfloat16mf2_t vd,
+ vfloat32m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_bf16_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfncvt_f_f_w_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv4bf16.nxv4f32.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfncvt_f_f_w_bf16m1_tum(vbool16_t vm, vbfloat16m1_t vd,
+ vfloat32m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_bf16_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfncvt_f_f_w_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv8bf16.nxv8f32.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfncvt_f_f_w_bf16m2_tum(vbool8_t vm, vbfloat16m2_t vd,
+ vfloat32m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_bf16_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfncvt_f_f_w_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv16bf16.nxv16f32.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfncvt_f_f_w_bf16m4_tum(vbool4_t vm, vbfloat16m4_t vd,
+ vfloat32m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_bf16_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_x_f_w_bf16mf4_i8mf8_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vfncvt_x_f_w_bf16mf4_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_x_f_w_bf16mf2_i8mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vfncvt_x_f_w_bf16mf2_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_x_f_w_bf16m1_i8mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vfncvt_x_f_w_bf16m1_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_x_f_w_bf16m2_i8m1_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vfncvt_x_f_w_bf16m2_i8m1_tumu(vbool8_t vm, vint8m1_t vd,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_x_f_w_bf16m4_i8m2_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vfncvt_x_f_w_bf16m4_i8m2_tumu(vbool4_t vm, vint8m2_t vd,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_x_f_w_bf16m8_i8m4_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vfncvt_x_f_w_bf16m8_i8m4_tumu(vbool2_t vm, vint8m4_t vd,
+ vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_xu_f_w_bf16mf4_u8mf8_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vfncvt_xu_f_w_bf16mf4_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_xu_f_w_bf16mf2_u8mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vfncvt_xu_f_w_bf16mf2_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_xu_f_w_bf16m1_u8mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vfncvt_xu_f_w_bf16m1_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_xu_f_w_bf16m2_u8m1_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vfncvt_xu_f_w_bf16m2_u8m1_tumu(vbool8_t vm, vuint8m1_t vd,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_xu_f_w_bf16m4_u8m2_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vfncvt_xu_f_w_bf16m4_u8m2_tumu(vbool4_t vm, vuint8m2_t vd,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_xu_f_w_bf16m8_u8m4_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vfncvt_xu_f_w_bf16m8_u8m4_tumu(vbool2_t vm, vuint8m4_t vd,
+ vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfncvt_f_f_w_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv1bf16.nxv1f32.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfncvt_f_f_w_bf16mf4_tumu(vbool64_t vm, vbfloat16mf4_t vd,
+ vfloat32mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_bf16_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfncvt_f_f_w_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv2bf16.nxv2f32.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfncvt_f_f_w_bf16mf2_tumu(vbool32_t vm, vbfloat16mf2_t vd,
+ vfloat32m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_bf16_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfncvt_f_f_w_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv4bf16.nxv4f32.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfncvt_f_f_w_bf16m1_tumu(vbool16_t vm, vbfloat16m1_t vd,
+ vfloat32m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_bf16_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfncvt_f_f_w_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv8bf16.nxv8f32.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfncvt_f_f_w_bf16m2_tumu(vbool8_t vm, vbfloat16m2_t vd,
+ vfloat32m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_bf16_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfncvt_f_f_w_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv16bf16.nxv16f32.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfncvt_f_f_w_bf16m4_tumu(vbool4_t vm, vbfloat16m4_t vd,
+ vfloat32m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_bf16_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_x_f_w_bf16mf4_i8mf8_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vfncvt_x_f_w_bf16mf4_i8mf8_mu(vbool64_t vm, vint8mf8_t vd,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_x_f_w_bf16mf2_i8mf4_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vfncvt_x_f_w_bf16mf2_i8mf4_mu(vbool32_t vm, vint8mf4_t vd,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_x_f_w_bf16m1_i8mf2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vfncvt_x_f_w_bf16m1_i8mf2_mu(vbool16_t vm, vint8mf2_t vd,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_x_f_w_bf16m2_i8m1_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vfncvt_x_f_w_bf16m2_i8m1_mu(vbool8_t vm, vint8m1_t vd,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_x_f_w_bf16m4_i8m2_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vfncvt_x_f_w_bf16m4_i8m2_mu(vbool4_t vm, vint8m2_t vd,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_x_f_w_bf16m8_i8m4_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vfncvt_x_f_w_bf16m8_i8m4_mu(vbool2_t vm, vint8m4_t vd,
+ vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_xu_f_w_bf16mf4_u8mf8_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vfncvt_xu_f_w_bf16mf4_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_xu_f_w_bf16mf2_u8mf4_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vfncvt_xu_f_w_bf16mf2_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_xu_f_w_bf16m1_u8mf2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vfncvt_xu_f_w_bf16m1_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_xu_f_w_bf16m2_u8m1_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vfncvt_xu_f_w_bf16m2_u8m1_mu(vbool8_t vm, vuint8m1_t vd,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_xu_f_w_bf16m4_u8m2_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vfncvt_xu_f_w_bf16m4_u8m2_mu(vbool4_t vm, vuint8m2_t vd,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_xu_f_w_bf16m8_u8m4_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vfncvt_xu_f_w_bf16m8_u8m4_mu(vbool2_t vm, vuint8m4_t vd,
+ vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfncvt_f_f_w_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv1bf16.nxv1f32.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfncvt_f_f_w_bf16mf4_mu(vbool64_t vm, vbfloat16mf4_t vd,
+ vfloat32mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_bf16_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfncvt_f_f_w_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv2bf16.nxv2f32.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfncvt_f_f_w_bf16mf2_mu(vbool32_t vm, vbfloat16mf2_t vd,
+ vfloat32m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_bf16_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfncvt_f_f_w_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv4bf16.nxv4f32.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfncvt_f_f_w_bf16m1_mu(vbool16_t vm, vbfloat16m1_t vd,
+ vfloat32m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_bf16_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfncvt_f_f_w_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv8bf16.nxv8f32.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfncvt_f_f_w_bf16m2_mu(vbool8_t vm, vbfloat16m2_t vd,
+ vfloat32m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_bf16_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfncvt_f_f_w_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv16bf16.nxv16f32.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfncvt_f_f_w_bf16m4_mu(vbool4_t vm, vbfloat16m4_t vd,
+ vfloat32m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_bf16_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_x_f_w_bf16mf4_i8mf8_rm_tu(
+// CHECK-RV64-SAME: <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.x.f.w.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vfncvt_x_f_w_bf16mf4_i8mf8_rm_tu(vint8mf8_t vd,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x_tu(vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_x_f_w_bf16mf2_i8mf4_rm_tu(
+// CHECK-RV64-SAME: <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.x.f.w.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vfncvt_x_f_w_bf16mf2_i8mf4_rm_tu(vint8mf4_t vd,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x_tu(vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_x_f_w_bf16m1_i8mf2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.x.f.w.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vfncvt_x_f_w_bf16m1_i8mf2_rm_tu(vint8mf2_t vd,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_tu(vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_x_f_w_bf16m2_i8m1_rm_tu(
+// CHECK-RV64-SAME: <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.x.f.w.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vfncvt_x_f_w_bf16m2_i8m1_rm_tu(vint8m1_t vd, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x_tu(vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_x_f_w_bf16m4_i8m2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.x.f.w.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vfncvt_x_f_w_bf16m4_i8m2_rm_tu(vint8m2_t vd, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x_tu(vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_x_f_w_bf16m8_i8m4_rm_tu(
+// CHECK-RV64-SAME: <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.x.f.w.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vfncvt_x_f_w_bf16m8_i8m4_rm_tu(vint8m4_t vd, vbfloat16m8_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x_tu(vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_xu_f_w_bf16mf4_u8mf8_rm_tu(
+// CHECK-RV64-SAME: <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vfncvt_xu_f_w_bf16mf4_u8mf8_rm_tu(vuint8mf8_t vd,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_tu(vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_xu_f_w_bf16mf2_u8mf4_rm_tu(
+// CHECK-RV64-SAME: <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vfncvt_xu_f_w_bf16mf2_u8mf4_rm_tu(vuint8mf4_t vd,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_tu(vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_xu_f_w_bf16m1_u8mf2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vfncvt_xu_f_w_bf16m1_u8mf2_rm_tu(vuint8mf2_t vd,
+ vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_tu(vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_xu_f_w_bf16m2_u8m1_rm_tu(
+// CHECK-RV64-SAME: <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vfncvt_xu_f_w_bf16m2_u8m1_rm_tu(vuint8m1_t vd,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_tu(vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_xu_f_w_bf16m4_u8m2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vfncvt_xu_f_w_bf16m4_u8m2_rm_tu(vuint8m2_t vd,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_tu(vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_xu_f_w_bf16m8_u8m4_rm_tu(
+// CHECK-RV64-SAME: <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vfncvt_xu_f_w_bf16m8_u8m4_rm_tu(vuint8m4_t vd,
+ vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_tu(vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfncvt_f_f_w_bf16mf4_rm_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfncvt.f.f.w.nxv1bf16.nxv1f32.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x float> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfncvt_f_f_w_bf16mf4_rm_tu(vbfloat16mf4_t vd,
+ vfloat32mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_bf16_tu(vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfncvt_f_f_w_bf16mf2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfncvt.f.f.w.nxv2bf16.nxv2f32.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x float> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfncvt_f_f_w_bf16mf2_rm_tu(vbfloat16mf2_t vd,
+ vfloat32m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_bf16_tu(vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfncvt_f_f_w_bf16m1_rm_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfncvt.f.f.w.nxv4bf16.nxv4f32.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x float> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfncvt_f_f_w_bf16m1_rm_tu(vbfloat16m1_t vd, vfloat32m2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_f_bf16_tu(vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfncvt_f_f_w_bf16m2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfncvt.f.f.w.nxv8bf16.nxv8f32.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x float> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfncvt_f_f_w_bf16m2_rm_tu(vbfloat16m2_t vd, vfloat32m4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_f_bf16_tu(vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfncvt_f_f_w_bf16m4_rm_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfncvt.f.f.w.nxv16bf16.nxv16f32.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x float> [[VS2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfncvt_f_f_w_bf16m4_rm_tu(vbfloat16m4_t vd, vfloat32m8_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_f_bf16_tu(vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_x_f_w_bf16mf4_i8mf8_rm_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vfncvt_x_f_w_bf16mf4_i8mf8_rm_tum(vbool64_t vm, vint8mf8_t vd,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_x_f_w_bf16mf2_i8mf4_rm_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vfncvt_x_f_w_bf16mf2_i8mf4_rm_tum(vbool32_t vm, vint8mf4_t vd,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_x_f_w_bf16m1_i8mf2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vfncvt_x_f_w_bf16m1_i8mf2_rm_tum(vbool16_t vm, vint8mf2_t vd,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_x_f_w_bf16m2_i8m1_rm_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vfncvt_x_f_w_bf16m2_i8m1_rm_tum(vbool8_t vm, vint8m1_t vd,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_x_f_w_bf16m4_i8m2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vfncvt_x_f_w_bf16m4_i8m2_rm_tum(vbool4_t vm, vint8m2_t vd,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_x_f_w_bf16m8_i8m4_rm_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vfncvt_x_f_w_bf16m8_i8m4_rm_tum(vbool2_t vm, vint8m4_t vd,
+ vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_xu_f_w_bf16mf4_u8mf8_rm_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vfncvt_xu_f_w_bf16mf4_u8mf8_rm_tum(vbool64_t vm,
+ vuint8mf8_t vd,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_xu_f_w_bf16mf2_u8mf4_rm_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vfncvt_xu_f_w_bf16mf2_u8mf4_rm_tum(vbool32_t vm,
+ vuint8mf4_t vd,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_xu_f_w_bf16m1_u8mf2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vfncvt_xu_f_w_bf16m1_u8mf2_rm_tum(vbool16_t vm, vuint8mf2_t vd,
+ vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_xu_f_w_bf16m2_u8m1_rm_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vfncvt_xu_f_w_bf16m2_u8m1_rm_tum(vbool8_t vm, vuint8m1_t vd,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_xu_f_w_bf16m4_u8m2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vfncvt_xu_f_w_bf16m4_u8m2_rm_tum(vbool4_t vm, vuint8m2_t vd,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_xu_f_w_bf16m8_u8m4_rm_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vfncvt_xu_f_w_bf16m8_u8m4_rm_tum(vbool2_t vm, vuint8m4_t vd,
+ vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfncvt_f_f_w_bf16mf4_rm_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv1bf16.nxv1f32.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfncvt_f_f_w_bf16mf4_rm_tum(vbool64_t vm, vbfloat16mf4_t vd,
+ vfloat32mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_bf16_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfncvt_f_f_w_bf16mf2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv2bf16.nxv2f32.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfncvt_f_f_w_bf16mf2_rm_tum(vbool32_t vm, vbfloat16mf2_t vd,
+ vfloat32m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_bf16_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfncvt_f_f_w_bf16m1_rm_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv4bf16.nxv4f32.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfncvt_f_f_w_bf16m1_rm_tum(vbool16_t vm, vbfloat16m1_t vd,
+ vfloat32m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_bf16_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfncvt_f_f_w_bf16m2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv8bf16.nxv8f32.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfncvt_f_f_w_bf16m2_rm_tum(vbool8_t vm, vbfloat16m2_t vd,
+ vfloat32m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_bf16_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfncvt_f_f_w_bf16m4_rm_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv16bf16.nxv16f32.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfncvt_f_f_w_bf16m4_rm_tum(vbool4_t vm, vbfloat16m4_t vd,
+ vfloat32m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_bf16_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_x_f_w_bf16mf4_i8mf8_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vfncvt_x_f_w_bf16mf4_i8mf8_rm_tumu(vbool64_t vm, vint8mf8_t vd,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_x_f_w_bf16mf2_i8mf4_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vfncvt_x_f_w_bf16mf2_i8mf4_rm_tumu(vbool32_t vm, vint8mf4_t vd,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_x_f_w_bf16m1_i8mf2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vfncvt_x_f_w_bf16m1_i8mf2_rm_tumu(vbool16_t vm, vint8mf2_t vd,
+ vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_x_f_w_bf16m2_i8m1_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vfncvt_x_f_w_bf16m2_i8m1_rm_tumu(vbool8_t vm, vint8m1_t vd,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_x_f_w_bf16m4_i8m2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vfncvt_x_f_w_bf16m4_i8m2_rm_tumu(vbool4_t vm, vint8m2_t vd,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_x_f_w_bf16m8_i8m4_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vfncvt_x_f_w_bf16m8_i8m4_rm_tumu(vbool2_t vm, vint8m4_t vd,
+ vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_xu_f_w_bf16mf4_u8mf8_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vfncvt_xu_f_w_bf16mf4_u8mf8_rm_tumu(vbool64_t vm,
+ vuint8mf8_t vd,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_xu_f_w_bf16mf2_u8mf4_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vfncvt_xu_f_w_bf16mf2_u8mf4_rm_tumu(vbool32_t vm,
+ vuint8mf4_t vd,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_xu_f_w_bf16m1_u8mf2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vfncvt_xu_f_w_bf16m1_u8mf2_rm_tumu(vbool16_t vm,
+ vuint8mf2_t vd,
+ vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_xu_f_w_bf16m2_u8m1_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vfncvt_xu_f_w_bf16m2_u8m1_rm_tumu(vbool8_t vm, vuint8m1_t vd,
+ vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_xu_f_w_bf16m4_u8m2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vfncvt_xu_f_w_bf16m4_u8m2_rm_tumu(vbool4_t vm, vuint8m2_t vd,
+ vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_xu_f_w_bf16m8_u8m4_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vfncvt_xu_f_w_bf16m8_u8m4_rm_tumu(vbool2_t vm, vuint8m4_t vd,
+ vbfloat16m8_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfncvt_f_f_w_bf16mf4_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv1bf16.nxv1f32.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfncvt_f_f_w_bf16mf4_rm_tumu(vbool64_t vm,
+ vbfloat16mf4_t vd,
+ vfloat32mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_bf16_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfncvt_f_f_w_bf16mf2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv2bf16.nxv2f32.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfncvt_f_f_w_bf16mf2_rm_tumu(vbool32_t vm,
+ vbfloat16mf2_t vd,
+ vfloat32m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_bf16_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfncvt_f_f_w_bf16m1_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv4bf16.nxv4f32.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfncvt_f_f_w_bf16m1_rm_tumu(vbool16_t vm, vbfloat16m1_t vd,
+ vfloat32m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_bf16_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfncvt_f_f_w_bf16m2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv8bf16.nxv8f32.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfncvt_f_f_w_bf16m2_rm_tumu(vbool8_t vm, vbfloat16m2_t vd,
+ vfloat32m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_bf16_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfncvt_f_f_w_bf16m4_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv16bf16.nxv16f32.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfncvt_f_f_w_bf16m4_rm_tumu(vbool4_t vm, vbfloat16m4_t vd,
+ vfloat32m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_bf16_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_x_f_w_bf16mf4_i8mf8_rm_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vfncvt_x_f_w_bf16mf4_i8mf8_rm_mu(vbool64_t vm, vint8mf8_t vd,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_x_f_w_bf16mf2_i8mf4_rm_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vfncvt_x_f_w_bf16mf2_i8mf4_rm_mu(vbool32_t vm, vint8mf4_t vd,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_x_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_x_f_w_bf16m1_i8mf2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vfncvt_x_f_w_bf16m1_i8mf2_rm_mu(vbool16_t vm, vint8mf2_t vd,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_x_f_w_bf16m2_i8m1_rm_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vfncvt_x_f_w_bf16m2_i8m1_rm_mu(vbool8_t vm, vint8m1_t vd,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_x_f_w_bf16m4_i8m2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vfncvt_x_f_w_bf16m4_i8m2_rm_mu(vbool4_t vm, vint8m2_t vd,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_x_f_w_bf16m8_i8m4_rm_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.x.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vfncvt_x_f_w_bf16m8_i8m4_rm_mu(vbool2_t vm, vint8m4_t vd,
+ vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_x_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_xu_f_w_bf16mf4_u8mf8_rm_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vfncvt_xu_f_w_bf16mf4_u8mf8_rm_mu(vbool64_t vm, vuint8mf8_t vd,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_xu_f_w_bf16mf2_u8mf4_rm_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vfncvt_xu_f_w_bf16mf2_u8mf4_rm_mu(vbool32_t vm, vuint8mf4_t vd,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_xu_f_w_bf16m1_u8mf2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vfncvt_xu_f_w_bf16m1_u8mf2_rm_mu(vbool16_t vm, vuint8mf2_t vd,
+ vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_xu_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_xu_f_w_bf16m2_u8m1_rm_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vfncvt_xu_f_w_bf16m2_u8m1_rm_mu(vbool8_t vm, vuint8m1_t vd,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_xu_f_w_bf16m4_u8m2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vfncvt_xu_f_w_bf16m4_u8m2_rm_mu(vbool4_t vm, vuint8m2_t vd,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_xu_f_w_bf16m8_u8m4_rm_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vfncvt_xu_f_w_bf16m8_u8m4_rm_mu(vbool2_t vm, vuint8m4_t vd,
+ vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_xu_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfncvt_f_f_w_bf16mf4_rm_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv1bf16.nxv1f32.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfncvt_f_f_w_bf16mf4_rm_mu(vbool64_t vm, vbfloat16mf4_t vd,
+ vfloat32mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_bf16_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfncvt_f_f_w_bf16mf2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv2bf16.nxv2f32.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfncvt_f_f_w_bf16mf2_rm_mu(vbool32_t vm, vbfloat16mf2_t vd,
+ vfloat32m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_bf16_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfncvt_f_f_w_bf16m1_rm_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv4bf16.nxv4f32.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfncvt_f_f_w_bf16m1_rm_mu(vbool16_t vm, vbfloat16m1_t vd,
+ vfloat32m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_bf16_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfncvt_f_f_w_bf16m2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv8bf16.nxv8f32.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfncvt_f_f_w_bf16m2_rm_mu(vbool8_t vm, vbfloat16m2_t vd,
+ vfloat32m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_bf16_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfncvt_f_f_w_bf16m4_rm_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfncvt.f.f.w.mask.nxv16bf16.nxv16f32.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfncvt_f_f_w_bf16m4_rm_mu(vbool4_t vm, vbfloat16m4_t vd,
+ vfloat32m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_f_bf16_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfncvt_rod.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfncvt_rod.c
new file mode 100644
index 0000000000000..0745633042d44
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfncvt_rod.c
@@ -0,0 +1,233 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfncvt_rod_f_f_w_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.nxv1bf16.nxv1f32.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x float> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfncvt_rod_f_f_w_bf16mf4_tu(vbfloat16mf4_t vd,
+ vfloat32mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_rod_f_bf16_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfncvt_rod_f_f_w_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.nxv2bf16.nxv2f32.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x float> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfncvt_rod_f_f_w_bf16mf2_tu(vbfloat16mf2_t vd,
+ vfloat32m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_rod_f_bf16_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfncvt_rod_f_f_w_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.nxv4bf16.nxv4f32.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x float> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfncvt_rod_f_f_w_bf16m1_tu(vbfloat16m1_t vd,
+ vfloat32m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_rod_f_bf16_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfncvt_rod_f_f_w_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.nxv8bf16.nxv8f32.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x float> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfncvt_rod_f_f_w_bf16m2_tu(vbfloat16m2_t vd,
+ vfloat32m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_rod_f_bf16_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfncvt_rod_f_f_w_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.nxv16bf16.nxv16f32.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x float> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfncvt_rod_f_f_w_bf16m4_tu(vbfloat16m4_t vd,
+ vfloat32m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_rod_f_bf16_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfncvt_rod_f_f_w_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv1bf16.nxv1f32.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfncvt_rod_f_f_w_bf16mf4_tum(vbool64_t vm,
+ vbfloat16mf4_t vd,
+ vfloat32mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_rod_f_bf16_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfncvt_rod_f_f_w_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv2bf16.nxv2f32.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfncvt_rod_f_f_w_bf16mf2_tum(vbool32_t vm,
+ vbfloat16mf2_t vd,
+ vfloat32m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_rod_f_bf16_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfncvt_rod_f_f_w_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv4bf16.nxv4f32.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfncvt_rod_f_f_w_bf16m1_tum(vbool16_t vm, vbfloat16m1_t vd,
+ vfloat32m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_rod_f_bf16_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfncvt_rod_f_f_w_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv8bf16.nxv8f32.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfncvt_rod_f_f_w_bf16m2_tum(vbool8_t vm, vbfloat16m2_t vd,
+ vfloat32m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_rod_f_bf16_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfncvt_rod_f_f_w_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv16bf16.nxv16f32.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfncvt_rod_f_f_w_bf16m4_tum(vbool4_t vm, vbfloat16m4_t vd,
+ vfloat32m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_rod_f_bf16_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfncvt_rod_f_f_w_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv1bf16.nxv1f32.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfncvt_rod_f_f_w_bf16mf4_tumu(vbool64_t vm,
+ vbfloat16mf4_t vd,
+ vfloat32mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rod_f_bf16_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfncvt_rod_f_f_w_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv2bf16.nxv2f32.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfncvt_rod_f_f_w_bf16mf2_tumu(vbool32_t vm,
+ vbfloat16mf2_t vd,
+ vfloat32m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_rod_f_bf16_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfncvt_rod_f_f_w_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv4bf16.nxv4f32.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfncvt_rod_f_f_w_bf16m1_tumu(vbool16_t vm, vbfloat16m1_t vd,
+ vfloat32m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_rod_f_bf16_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfncvt_rod_f_f_w_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv8bf16.nxv8f32.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfncvt_rod_f_f_w_bf16m2_tumu(vbool8_t vm, vbfloat16m2_t vd,
+ vfloat32m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_rod_f_bf16_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfncvt_rod_f_f_w_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv16bf16.nxv16f32.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfncvt_rod_f_f_w_bf16m4_tumu(vbool4_t vm, vbfloat16m4_t vd,
+ vfloat32m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_rod_f_bf16_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfncvt_rod_f_f_w_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv1bf16.nxv1f32.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfncvt_rod_f_f_w_bf16mf4_mu(vbool64_t vm, vbfloat16mf4_t vd,
+ vfloat32mf2_t vs2, size_t vl) {
+ return __riscv_vfncvt_rod_f_bf16_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfncvt_rod_f_f_w_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv2bf16.nxv2f32.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfncvt_rod_f_f_w_bf16mf2_mu(vbool32_t vm, vbfloat16mf2_t vd,
+ vfloat32m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_rod_f_bf16_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfncvt_rod_f_f_w_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv4bf16.nxv4f32.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfncvt_rod_f_f_w_bf16m1_mu(vbool16_t vm, vbfloat16m1_t vd,
+ vfloat32m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_rod_f_bf16_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfncvt_rod_f_f_w_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv8bf16.nxv8f32.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfncvt_rod_f_f_w_bf16m2_mu(vbool8_t vm, vbfloat16m2_t vd,
+ vfloat32m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_rod_f_bf16_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfncvt_rod_f_f_w_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv16bf16.nxv16f32.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfncvt_rod_f_f_w_bf16m4_mu(vbool4_t vm, vbfloat16m4_t vd,
+ vfloat32m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_rod_f_bf16_mu(vm, vd, vs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfncvt_rtz.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfncvt_rtz.c
new file mode 100644
index 0000000000000..b906c5f411064
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfncvt_rtz.c
@@ -0,0 +1,572 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_rtz_x_f_w_bf16mf4_i8mf8_tu(
+// CHECK-RV64-SAME: <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vfncvt_rtz_x_f_w_bf16mf4_i8mf8_tu(vint8mf8_t vd,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_x_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_rtz_x_f_w_bf16mf2_i8mf4_tu(
+// CHECK-RV64-SAME: <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vfncvt_rtz_x_f_w_bf16mf2_i8mf4_tu(vint8mf4_t vd,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_x_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_rtz_x_f_w_bf16m1_i8mf2_tu(
+// CHECK-RV64-SAME: <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vfncvt_rtz_x_f_w_bf16m1_i8mf2_tu(vint8mf2_t vd,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_x_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_rtz_x_f_w_bf16m2_i8m1_tu(
+// CHECK-RV64-SAME: <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vfncvt_rtz_x_f_w_bf16m2_i8m1_tu(vint8m1_t vd, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_x_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_rtz_x_f_w_bf16m4_i8m2_tu(
+// CHECK-RV64-SAME: <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vfncvt_rtz_x_f_w_bf16m4_i8m2_tu(vint8m2_t vd, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_x_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_rtz_x_f_w_bf16m8_i8m4_tu(
+// CHECK-RV64-SAME: <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vfncvt_rtz_x_f_w_bf16m8_i8m4_tu(vint8m4_t vd, vbfloat16m8_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_x_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_rtz_xu_f_w_bf16mf4_u8mf8_tu(
+// CHECK-RV64-SAME: <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vfncvt_rtz_xu_f_w_bf16mf4_u8mf8_tu(vuint8mf8_t vd,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_rtz_xu_f_w_bf16mf2_u8mf4_tu(
+// CHECK-RV64-SAME: <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vfncvt_rtz_xu_f_w_bf16mf2_u8mf4_tu(vuint8mf4_t vd,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_rtz_xu_f_w_bf16m1_u8mf2_tu(
+// CHECK-RV64-SAME: <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vfncvt_rtz_xu_f_w_bf16m1_u8mf2_tu(vuint8mf2_t vd,
+ vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_rtz_xu_f_w_bf16m2_u8m1_tu(
+// CHECK-RV64-SAME: <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vfncvt_rtz_xu_f_w_bf16m2_u8m1_tu(vuint8m1_t vd,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_xu_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_rtz_xu_f_w_bf16m4_u8m2_tu(
+// CHECK-RV64-SAME: <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vfncvt_rtz_xu_f_w_bf16m4_u8m2_tu(vuint8m2_t vd,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_xu_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_rtz_xu_f_w_bf16m8_u8m4_tu(
+// CHECK-RV64-SAME: <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vfncvt_rtz_xu_f_w_bf16m8_u8m4_tu(vuint8m4_t vd,
+ vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_xu_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_rtz_x_f_w_bf16mf4_i8mf8_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vfncvt_rtz_x_f_w_bf16mf4_i8mf8_tum(vbool64_t vm, vint8mf8_t vd,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_x_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_rtz_x_f_w_bf16mf2_i8mf4_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vfncvt_rtz_x_f_w_bf16mf2_i8mf4_tum(vbool32_t vm, vint8mf4_t vd,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_x_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_rtz_x_f_w_bf16m1_i8mf2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vfncvt_rtz_x_f_w_bf16m1_i8mf2_tum(vbool16_t vm, vint8mf2_t vd,
+ vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_x_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_rtz_x_f_w_bf16m2_i8m1_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vfncvt_rtz_x_f_w_bf16m2_i8m1_tum(vbool8_t vm, vint8m1_t vd,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_x_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_rtz_x_f_w_bf16m4_i8m2_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vfncvt_rtz_x_f_w_bf16m4_i8m2_tum(vbool4_t vm, vint8m2_t vd,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_x_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_rtz_x_f_w_bf16m8_i8m4_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vfncvt_rtz_x_f_w_bf16m8_i8m4_tum(vbool2_t vm, vint8m4_t vd,
+ vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_x_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_rtz_xu_f_w_bf16mf4_u8mf8_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vfncvt_rtz_xu_f_w_bf16mf4_u8mf8_tum(vbool64_t vm,
+ vuint8mf8_t vd,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_rtz_xu_f_w_bf16mf2_u8mf4_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vfncvt_rtz_xu_f_w_bf16mf2_u8mf4_tum(vbool32_t vm,
+ vuint8mf4_t vd,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_rtz_xu_f_w_bf16m1_u8mf2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vfncvt_rtz_xu_f_w_bf16m1_u8mf2_tum(vbool16_t vm,
+ vuint8mf2_t vd,
+ vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_rtz_xu_f_w_bf16m2_u8m1_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vfncvt_rtz_xu_f_w_bf16m2_u8m1_tum(vbool8_t vm, vuint8m1_t vd,
+ vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_rtz_xu_f_w_bf16m4_u8m2_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vfncvt_rtz_xu_f_w_bf16m4_u8m2_tum(vbool4_t vm, vuint8m2_t vd,
+ vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_rtz_xu_f_w_bf16m8_u8m4_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vfncvt_rtz_xu_f_w_bf16m8_u8m4_tum(vbool2_t vm, vuint8m4_t vd,
+ vbfloat16m8_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_rtz_x_f_w_bf16mf4_i8mf8_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vfncvt_rtz_x_f_w_bf16mf4_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_x_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_rtz_x_f_w_bf16mf2_i8mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vfncvt_rtz_x_f_w_bf16mf2_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_x_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_rtz_x_f_w_bf16m1_i8mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vfncvt_rtz_x_f_w_bf16m1_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd,
+ vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_x_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_rtz_x_f_w_bf16m2_i8m1_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vfncvt_rtz_x_f_w_bf16m2_i8m1_tumu(vbool8_t vm, vint8m1_t vd,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_x_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_rtz_x_f_w_bf16m4_i8m2_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vfncvt_rtz_x_f_w_bf16m4_i8m2_tumu(vbool4_t vm, vint8m2_t vd,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_x_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_rtz_x_f_w_bf16m8_i8m4_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vfncvt_rtz_x_f_w_bf16m8_i8m4_tumu(vbool2_t vm, vint8m4_t vd,
+ vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_x_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_rtz_xu_f_w_bf16mf4_u8mf8_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vfncvt_rtz_xu_f_w_bf16mf4_u8mf8_tumu(vbool64_t vm,
+ vuint8mf8_t vd,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_rtz_xu_f_w_bf16mf2_u8mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vfncvt_rtz_xu_f_w_bf16mf2_u8mf4_tumu(vbool32_t vm,
+ vuint8mf4_t vd,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_rtz_xu_f_w_bf16m1_u8mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vfncvt_rtz_xu_f_w_bf16m1_u8mf2_tumu(vbool16_t vm,
+ vuint8mf2_t vd,
+ vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_rtz_xu_f_w_bf16m2_u8m1_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vfncvt_rtz_xu_f_w_bf16m2_u8m1_tumu(vbool8_t vm, vuint8m1_t vd,
+ vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_rtz_xu_f_w_bf16m4_u8m2_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vfncvt_rtz_xu_f_w_bf16m4_u8m2_tumu(vbool4_t vm, vuint8m2_t vd,
+ vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_rtz_xu_f_w_bf16m8_u8m4_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vfncvt_rtz_xu_f_w_bf16m8_u8m4_tumu(vbool2_t vm, vuint8m4_t vd,
+ vbfloat16m8_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_rtz_x_f_w_bf16mf4_i8mf8_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vfncvt_rtz_x_f_w_bf16mf4_i8mf8_mu(vbool64_t vm, vint8mf8_t vd,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_x_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_rtz_x_f_w_bf16mf2_i8mf4_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vfncvt_rtz_x_f_w_bf16mf2_i8mf4_mu(vbool32_t vm, vint8mf4_t vd,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_x_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_rtz_x_f_w_bf16m1_i8mf2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vfncvt_rtz_x_f_w_bf16m1_i8mf2_mu(vbool16_t vm, vint8mf2_t vd,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_x_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_rtz_x_f_w_bf16m2_i8m1_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vfncvt_rtz_x_f_w_bf16m2_i8m1_mu(vbool8_t vm, vint8m1_t vd,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_x_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_rtz_x_f_w_bf16m4_i8m2_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vfncvt_rtz_x_f_w_bf16m4_i8m2_mu(vbool4_t vm, vint8m2_t vd,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_x_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_rtz_x_f_w_bf16m8_i8m4_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vfncvt_rtz_x_f_w_bf16m8_i8m4_mu(vbool2_t vm, vint8m4_t vd,
+ vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_x_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vfncvt_rtz_xu_f_w_bf16mf4_u8mf8_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i8.nxv1bf16.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vfncvt_rtz_xu_f_w_bf16mf4_u8mf8_mu(vbool64_t vm,
+ vuint8mf8_t vd,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vfncvt_rtz_xu_f_w_bf16mf2_u8mf4_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i8.nxv2bf16.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vfncvt_rtz_xu_f_w_bf16mf2_u8mf4_mu(vbool32_t vm,
+ vuint8mf4_t vd,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vfncvt_rtz_xu_f_w_bf16m1_u8mf2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i8.nxv4bf16.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vfncvt_rtz_xu_f_w_bf16m1_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd,
+ vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfncvt_rtz_xu_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vfncvt_rtz_xu_f_w_bf16m2_u8m1_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i8.nxv8bf16.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vfncvt_rtz_xu_f_w_bf16m2_u8m1_mu(vbool8_t vm, vuint8m1_t vd,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_xu_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vfncvt_rtz_xu_f_w_bf16m4_u8m2_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv16i8.nxv16bf16.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vfncvt_rtz_xu_f_w_bf16m4_u8m2_mu(vbool4_t vm, vuint8m2_t vd,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_xu_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vfncvt_rtz_xu_f_w_bf16m8_u8m4_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv32i8.nxv32bf16.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vfncvt_rtz_xu_f_w_bf16m8_u8m4_mu(vbool2_t vm, vuint8m4_t vd,
+ vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfncvt_rtz_xu_mu(vm, vd, vs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfnmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfnmacc.c
new file mode 100644
index 0000000000000..cc487b49429dd
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfnmacc.c
@@ -0,0 +1,489 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmacc_vv_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmacc.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmacc_vv_bf16mf4_tu(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmacc_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmacc_vf_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmacc.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmacc_vf_bf16mf4_tu(vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmacc_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmacc_vv_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmacc.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmacc_vv_bf16mf2_tu(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmacc_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmacc_vf_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmacc.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmacc_vf_bf16mf2_tu(vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmacc_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmacc_vv_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmacc.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmacc_vv_bf16m1_tu(vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmacc_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmacc_vf_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmacc.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmacc_vf_bf16m1_tu(vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmacc_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmacc_vv_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmacc.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmacc_vv_bf16m2_tu(vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmacc_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmacc_vf_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmacc.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmacc_vf_bf16m2_tu(vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmacc_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmacc_vv_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmacc.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmacc_vv_bf16m4_tu(vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmacc_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmacc_vf_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmacc.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmacc_vf_bf16m4_tu(vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmacc_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmacc_vv_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmacc.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmacc_vv_bf16m8_tu(vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmacc_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmacc_vf_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmacc.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmacc_vf_bf16m8_tu(vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmacc_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmacc_vv_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmacc.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmacc_vv_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmacc_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmacc_vf_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmacc.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmacc_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmacc_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmacc_vv_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmacc.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmacc_vv_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmacc_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmacc_vf_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmacc.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmacc_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmacc_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmacc_vv_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmacc.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmacc_vv_bf16m1_tum(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmacc_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmacc_vf_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmacc.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmacc_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmacc_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmacc_vv_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmacc.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmacc_vv_bf16m2_tum(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmacc_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmacc_vf_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmacc.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmacc_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmacc_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmacc_vv_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmacc.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmacc_vv_bf16m4_tum(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmacc_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmacc_vf_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmacc.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmacc_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmacc_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmacc_vv_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmacc.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmacc_vv_bf16m8_tum(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmacc_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmacc_vf_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmacc.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmacc_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmacc_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmacc_vv_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmacc.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmacc_vv_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmacc_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmacc_vf_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmacc.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmacc_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmacc_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmacc_vv_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmacc.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmacc_vv_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmacc_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmacc_vf_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmacc.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmacc_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmacc_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmacc_vv_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmacc.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmacc_vv_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmacc_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmacc_vf_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmacc.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmacc_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmacc_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmacc_vv_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmacc.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmacc_vv_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmacc_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmacc_vf_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmacc.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmacc_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmacc_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmacc_vv_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmacc.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmacc_vv_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmacc_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmacc_vf_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmacc.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmacc_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmacc_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmacc_vv_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmacc.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmacc_vv_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmacc_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmacc_vf_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmacc.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmacc_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmacc_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmacc_vv_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmacc.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmacc_vv_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmacc_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmacc_vf_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmacc.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmacc_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmacc_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmacc_vv_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmacc.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmacc_vv_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmacc_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmacc_vf_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmacc.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmacc_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmacc_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmacc_vv_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmacc.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmacc_vv_bf16m1_mu(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmacc_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmacc_vf_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmacc.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmacc_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmacc_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmacc_vv_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmacc.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmacc_vv_bf16m2_mu(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmacc_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmacc_vf_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmacc.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmacc_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmacc_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmacc_vv_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmacc.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmacc_vv_bf16m4_mu(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmacc_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmacc_vf_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmacc.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmacc_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmacc_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmacc_vv_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmacc.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmacc_vv_bf16m8_mu(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmacc_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmacc_vf_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmacc.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmacc_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmacc_mu(mask, vd, rs1, vs2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfnmadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfnmadd.c
new file mode 100644
index 0000000000000..f9c348b3dbb0b
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfnmadd.c
@@ -0,0 +1,489 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmadd_vv_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmadd.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmadd_vv_bf16mf4_tu(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmadd_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmadd_vf_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmadd.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmadd_vf_bf16mf4_tu(vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmadd_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmadd_vv_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmadd.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmadd_vv_bf16mf2_tu(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmadd_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmadd_vf_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmadd.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmadd_vf_bf16mf2_tu(vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmadd_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmadd_vv_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmadd.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmadd_vv_bf16m1_tu(vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmadd_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmadd_vf_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmadd.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmadd_vf_bf16m1_tu(vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmadd_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmadd_vv_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmadd.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmadd_vv_bf16m2_tu(vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmadd_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmadd_vf_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmadd.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmadd_vf_bf16m2_tu(vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmadd_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmadd_vv_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmadd.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmadd_vv_bf16m4_tu(vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmadd_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmadd_vf_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmadd.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmadd_vf_bf16m4_tu(vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmadd_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmadd_vv_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmadd.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmadd_vv_bf16m8_tu(vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmadd_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmadd_vf_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmadd.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmadd_vf_bf16m8_tu(vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmadd_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmadd_vv_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmadd.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmadd_vv_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmadd_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmadd_vf_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmadd.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmadd_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmadd_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmadd_vv_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmadd.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmadd_vv_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmadd_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmadd_vf_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmadd.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmadd_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmadd_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmadd_vv_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmadd.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmadd_vv_bf16m1_tum(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmadd_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmadd_vf_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmadd.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmadd_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmadd_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmadd_vv_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmadd.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmadd_vv_bf16m2_tum(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmadd_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmadd_vf_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmadd.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmadd_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmadd_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmadd_vv_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmadd.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmadd_vv_bf16m4_tum(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmadd_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmadd_vf_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmadd.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmadd_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmadd_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmadd_vv_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmadd.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmadd_vv_bf16m8_tum(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmadd_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmadd_vf_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmadd.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmadd_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmadd_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmadd_vv_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmadd.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmadd_vv_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmadd_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmadd_vf_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmadd.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmadd_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmadd_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmadd_vv_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmadd.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmadd_vv_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmadd_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmadd_vf_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmadd.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmadd_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmadd_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmadd_vv_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmadd.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmadd_vv_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmadd_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmadd_vf_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmadd.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmadd_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmadd_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmadd_vv_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmadd.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmadd_vv_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmadd_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmadd_vf_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmadd.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmadd_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmadd_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmadd_vv_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmadd.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmadd_vv_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmadd_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmadd_vf_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmadd.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmadd_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmadd_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmadd_vv_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmadd.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmadd_vv_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmadd_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmadd_vf_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmadd.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmadd_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmadd_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmadd_vv_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmadd.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmadd_vv_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmadd_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmadd_vf_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmadd.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmadd_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmadd_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmadd_vv_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmadd.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmadd_vv_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmadd_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmadd_vf_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmadd.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmadd_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmadd_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmadd_vv_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmadd.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmadd_vv_bf16m1_mu(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmadd_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmadd_vf_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmadd.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmadd_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmadd_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmadd_vv_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmadd.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmadd_vv_bf16m2_mu(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmadd_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmadd_vf_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmadd.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmadd_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmadd_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmadd_vv_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmadd.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmadd_vv_bf16m4_mu(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmadd_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmadd_vf_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmadd.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmadd_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmadd_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmadd_vv_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmadd.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmadd_vv_bf16m8_mu(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmadd_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmadd_vf_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmadd.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmadd_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmadd_mu(mask, vd, rs1, vs2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfnmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfnmsac.c
new file mode 100644
index 0000000000000..83d35e81403ce
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfnmsac.c
@@ -0,0 +1,489 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmsac_vv_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsac.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmsac_vv_bf16mf4_tu(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmsac_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmsac_vf_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsac.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmsac_vf_bf16mf4_tu(vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmsac_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmsac_vv_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsac.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmsac_vv_bf16mf2_tu(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmsac_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmsac_vf_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsac.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmsac_vf_bf16mf2_tu(vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmsac_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmsac_vv_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsac.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmsac_vv_bf16m1_tu(vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmsac_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmsac_vf_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsac.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmsac_vf_bf16m1_tu(vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmsac_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmsac_vv_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsac.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmsac_vv_bf16m2_tu(vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmsac_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmsac_vf_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsac.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmsac_vf_bf16m2_tu(vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmsac_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmsac_vv_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsac.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmsac_vv_bf16m4_tu(vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmsac_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmsac_vf_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsac.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmsac_vf_bf16m4_tu(vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmsac_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmsac_vv_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmsac.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmsac_vv_bf16m8_tu(vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmsac_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmsac_vf_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmsac.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmsac_vf_bf16m8_tu(vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmsac_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmsac_vv_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsac.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmsac_vv_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmsac_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmsac_vf_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsac.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmsac_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmsac_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmsac_vv_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsac.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmsac_vv_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmsac_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmsac_vf_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsac.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmsac_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmsac_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmsac_vv_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsac.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmsac_vv_bf16m1_tum(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmsac_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmsac_vf_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsac.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmsac_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmsac_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmsac_vv_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsac.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmsac_vv_bf16m2_tum(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmsac_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmsac_vf_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsac.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmsac_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmsac_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmsac_vv_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsac.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmsac_vv_bf16m4_tum(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmsac_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmsac_vf_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsac.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmsac_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmsac_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmsac_vv_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmsac.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmsac_vv_bf16m8_tum(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmsac_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmsac_vf_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmsac.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmsac_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmsac_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmsac_vv_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsac.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmsac_vv_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmsac_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmsac_vf_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsac.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmsac_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmsac_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmsac_vv_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsac.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmsac_vv_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmsac_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmsac_vf_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsac.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmsac_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmsac_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmsac_vv_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsac.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmsac_vv_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmsac_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmsac_vf_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsac.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmsac_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmsac_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmsac_vv_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsac.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmsac_vv_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmsac_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmsac_vf_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsac.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmsac_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmsac_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmsac_vv_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsac.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmsac_vv_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmsac_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmsac_vf_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsac.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmsac_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmsac_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmsac_vv_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmsac.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmsac_vv_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmsac_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmsac_vf_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmsac.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmsac_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmsac_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmsac_vv_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsac.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmsac_vv_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmsac_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmsac_vf_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsac.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmsac_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmsac_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmsac_vv_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsac.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmsac_vv_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmsac_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmsac_vf_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsac.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmsac_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmsac_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmsac_vv_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsac.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmsac_vv_bf16m1_mu(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmsac_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmsac_vf_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsac.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmsac_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmsac_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmsac_vv_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsac.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmsac_vv_bf16m2_mu(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmsac_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmsac_vf_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsac.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmsac_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmsac_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmsac_vv_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsac.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmsac_vv_bf16m4_mu(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmsac_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmsac_vf_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsac.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmsac_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmsac_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmsac_vv_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmsac.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmsac_vv_bf16m8_mu(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmsac_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmsac_vf_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmsac.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmsac_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmsac_mu(mask, vd, rs1, vs2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfnmsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfnmsub.c
new file mode 100644
index 0000000000000..f5282a195131d
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfnmsub.c
@@ -0,0 +1,489 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmsub_vv_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsub.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmsub_vv_bf16mf4_tu(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmsub_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmsub_vf_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsub.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmsub_vf_bf16mf4_tu(vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmsub_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmsub_vv_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsub.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmsub_vv_bf16mf2_tu(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmsub_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmsub_vf_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsub.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmsub_vf_bf16mf2_tu(vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmsub_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmsub_vv_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsub.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmsub_vv_bf16m1_tu(vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmsub_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmsub_vf_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsub.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmsub_vf_bf16m1_tu(vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmsub_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmsub_vv_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsub.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmsub_vv_bf16m2_tu(vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmsub_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmsub_vf_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsub.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmsub_vf_bf16m2_tu(vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmsub_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmsub_vv_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsub.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmsub_vv_bf16m4_tu(vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmsub_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmsub_vf_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsub.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmsub_vf_bf16m4_tu(vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmsub_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmsub_vv_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmsub.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmsub_vv_bf16m8_tu(vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmsub_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmsub_vf_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmsub.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmsub_vf_bf16m8_tu(vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmsub_tu(vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmsub_vv_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsub.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmsub_vv_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmsub_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmsub_vf_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsub.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmsub_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmsub_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmsub_vv_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsub.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmsub_vv_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmsub_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmsub_vf_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsub.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmsub_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmsub_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmsub_vv_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsub.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmsub_vv_bf16m1_tum(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmsub_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmsub_vf_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsub.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmsub_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmsub_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmsub_vv_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsub.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmsub_vv_bf16m2_tum(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmsub_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmsub_vf_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsub.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmsub_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmsub_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmsub_vv_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsub.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmsub_vv_bf16m4_tum(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmsub_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmsub_vf_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsub.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmsub_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmsub_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmsub_vv_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmsub.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmsub_vv_bf16m8_tum(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmsub_tum(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmsub_vf_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmsub.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmsub_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmsub_tum(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmsub_vv_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsub.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmsub_vv_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmsub_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmsub_vf_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsub.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmsub_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmsub_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmsub_vv_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsub.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmsub_vv_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmsub_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmsub_vf_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsub.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmsub_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmsub_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmsub_vv_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsub.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmsub_vv_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmsub_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmsub_vf_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsub.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmsub_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmsub_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmsub_vv_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsub.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmsub_vv_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmsub_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmsub_vf_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsub.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmsub_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmsub_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmsub_vv_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsub.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmsub_vv_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmsub_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmsub_vf_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsub.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmsub_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmsub_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmsub_vv_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmsub.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmsub_vv_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmsub_tumu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmsub_vf_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmsub.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmsub_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmsub_tumu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmsub_vv_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsub.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmsub_vv_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmsub_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfnmsub_vf_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfnmsub.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfnmsub_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfnmsub_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmsub_vv_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsub.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmsub_vv_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmsub_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfnmsub_vf_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfnmsub.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfnmsub_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfnmsub_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmsub_vv_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsub.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmsub_vv_bf16m1_mu(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmsub_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfnmsub_vf_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfnmsub.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfnmsub_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfnmsub_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmsub_vv_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsub.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmsub_vv_bf16m2_mu(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmsub_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfnmsub_vf_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfnmsub.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfnmsub_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfnmsub_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmsub_vv_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsub.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmsub_vv_bf16m4_mu(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmsub_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfnmsub_vf_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfnmsub.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfnmsub_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfnmsub_mu(mask, vd, rs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmsub_vv_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x bfloat> [[VS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmsub.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x bfloat> [[VS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmsub_vv_bf16m8_mu(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmsub_mu(mask, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfnmsub_vf_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], bfloat noundef [[RS1:%.*]], <vscale x 32 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfnmsub.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[VD]], bfloat [[RS1]], <vscale x 32 x bfloat> [[VS2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfnmsub_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) {
+ return __riscv_vfnmsub_mu(mask, vd, rs1, vs2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfrec7.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfrec7.c
new file mode 100644
index 0000000000000..f8e5a339c87d7
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfrec7.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfrec7_v_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfrec7.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfrec7_v_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, size_t vl) {
+ return __riscv_vfrec7_tu(maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfrec7_v_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfrec7.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfrec7_v_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, size_t vl) {
+ return __riscv_vfrec7_tu(maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfrec7_v_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfrec7.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfrec7_v_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, size_t vl) {
+ return __riscv_vfrec7_tu(maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfrec7_v_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfrec7.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfrec7_v_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, size_t vl) {
+ return __riscv_vfrec7_tu(maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfrec7_v_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfrec7.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfrec7_v_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, size_t vl) {
+ return __riscv_vfrec7_tu(maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfrec7_v_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfrec7.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfrec7_v_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, size_t vl) {
+ return __riscv_vfrec7_tu(maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfrec7_v_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfrec7.mask.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfrec7_v_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, size_t vl) {
+ return __riscv_vfrec7_tum(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfrec7_v_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfrec7.mask.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfrec7_v_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, size_t vl) {
+ return __riscv_vfrec7_tum(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfrec7_v_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfrec7.mask.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfrec7_v_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, size_t vl) {
+ return __riscv_vfrec7_tum(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfrec7_v_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfrec7.mask.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfrec7_v_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, size_t vl) {
+ return __riscv_vfrec7_tum(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfrec7_v_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfrec7.mask.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfrec7_v_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, size_t vl) {
+ return __riscv_vfrec7_tum(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfrec7_v_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfrec7.mask.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfrec7_v_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, size_t vl) {
+ return __riscv_vfrec7_tum(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfrec7_v_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfrec7.mask.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfrec7_v_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, size_t vl) {
+ return __riscv_vfrec7_tumu(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfrec7_v_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfrec7.mask.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfrec7_v_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, size_t vl) {
+ return __riscv_vfrec7_tumu(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfrec7_v_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfrec7.mask.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfrec7_v_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, size_t vl) {
+ return __riscv_vfrec7_tumu(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfrec7_v_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfrec7.mask.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfrec7_v_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, size_t vl) {
+ return __riscv_vfrec7_tumu(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfrec7_v_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfrec7.mask.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfrec7_v_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, size_t vl) {
+ return __riscv_vfrec7_tumu(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfrec7_v_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfrec7.mask.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfrec7_v_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, size_t vl) {
+ return __riscv_vfrec7_tumu(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfrec7_v_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfrec7.mask.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfrec7_v_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, size_t vl) {
+ return __riscv_vfrec7_mu(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfrec7_v_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfrec7.mask.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfrec7_v_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, size_t vl) {
+ return __riscv_vfrec7_mu(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfrec7_v_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfrec7.mask.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfrec7_v_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, size_t vl) {
+ return __riscv_vfrec7_mu(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfrec7_v_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfrec7.mask.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfrec7_v_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, size_t vl) {
+ return __riscv_vfrec7_mu(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfrec7_v_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfrec7.mask.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfrec7_v_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, size_t vl) {
+ return __riscv_vfrec7_mu(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfrec7_v_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfrec7.mask.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfrec7_v_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, size_t vl) {
+ return __riscv_vfrec7_mu(mask, maskedoff, op1, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfrsqrt7.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfrsqrt7.c
new file mode 100644
index 0000000000000..7c6c926e50c10
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfrsqrt7.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfrsqrt7_v_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfrsqrt7.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfrsqrt7_v_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_tu(maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfrsqrt7_v_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfrsqrt7.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfrsqrt7_v_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_tu(maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfrsqrt7_v_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfrsqrt7.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfrsqrt7_v_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_tu(maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfrsqrt7_v_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfrsqrt7.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfrsqrt7_v_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_tu(maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfrsqrt7_v_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfrsqrt7.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfrsqrt7_v_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_tu(maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfrsqrt7_v_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfrsqrt7.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfrsqrt7_v_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_tu(maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfrsqrt7_v_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfrsqrt7_v_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_tum(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfrsqrt7_v_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfrsqrt7_v_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_tum(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfrsqrt7_v_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfrsqrt7_v_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_tum(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfrsqrt7_v_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfrsqrt7_v_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_tum(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfrsqrt7_v_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfrsqrt7_v_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_tum(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfrsqrt7_v_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfrsqrt7_v_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_tum(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfrsqrt7_v_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfrsqrt7_v_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_tumu(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfrsqrt7_v_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfrsqrt7_v_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_tumu(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfrsqrt7_v_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfrsqrt7_v_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_tumu(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfrsqrt7_v_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfrsqrt7_v_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_tumu(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfrsqrt7_v_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfrsqrt7_v_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_tumu(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfrsqrt7_v_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfrsqrt7_v_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_tumu(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfrsqrt7_v_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfrsqrt7_v_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_mu(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfrsqrt7_v_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfrsqrt7_v_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_mu(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfrsqrt7_v_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfrsqrt7_v_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_mu(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfrsqrt7_v_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfrsqrt7_v_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_mu(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfrsqrt7_v_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfrsqrt7_v_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_mu(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfrsqrt7_v_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfrsqrt7.mask.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfrsqrt7_v_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, size_t vl) {
+ return __riscv_vfrsqrt7_mu(mask, maskedoff, op1, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfrsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfrsub.c
new file mode 100644
index 0000000000000..c09caeb8207af
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfrsub.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfrsub_vf_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfrsub.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfrsub_vf_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfrsub_vf_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfrsub.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfrsub_vf_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfrsub_vf_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfrsub.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfrsub_vf_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfrsub_vf_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfrsub.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfrsub_vf_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfrsub_vf_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfrsub.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfrsub_vf_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfrsub_vf_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfrsub.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfrsub_vf_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfrsub_vf_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfrsub.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfrsub_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfrsub_vf_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfrsub.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfrsub_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfrsub_vf_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfrsub.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfrsub_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfrsub_vf_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfrsub.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfrsub_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfrsub_vf_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfrsub.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfrsub_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfrsub_vf_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfrsub.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfrsub_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfrsub_vf_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfrsub.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfrsub_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfrsub_vf_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfrsub.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfrsub_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfrsub_vf_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfrsub.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfrsub_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfrsub_vf_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfrsub.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfrsub_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfrsub_vf_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfrsub.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfrsub_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfrsub_vf_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfrsub.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfrsub_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfrsub_vf_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfrsub.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfrsub_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfrsub_vf_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfrsub.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfrsub_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfrsub_vf_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfrsub.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfrsub_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfrsub_vf_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfrsub.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfrsub_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfrsub_vf_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfrsub.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfrsub_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfrsub_vf_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfrsub.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfrsub_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfrsub_mu(mask, maskedoff, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfsgnj.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfsgnj.c
new file mode 100644
index 0000000000000..c1f69932f6a02
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfsgnj.c
@@ -0,0 +1,489 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnj_vv_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnj.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnj_vv_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfsgnj_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnj_vf_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnj.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnj_vf_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnj_vv_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnj.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnj_vv_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfsgnj_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnj_vf_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnj.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnj_vf_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnj_vv_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnj.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnj_vv_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfsgnj_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnj_vf_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnj.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnj_vf_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnj_vv_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnj.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnj_vv_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfsgnj_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnj_vf_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnj.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnj_vf_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnj_vv_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnj.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnj_vv_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfsgnj_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnj_vf_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnj.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnj_vf_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnj_vv_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnj.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnj_vv_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfsgnj_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnj_vf_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnj.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnj_vf_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnj_vv_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnj.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnj_vv_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfsgnj_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnj_vf_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnj.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnj_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnj_vv_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnj.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnj_vv_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfsgnj_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnj_vf_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnj.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnj_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnj_vv_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnj.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnj_vv_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfsgnj_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnj_vf_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnj.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnj_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnj_vv_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnj.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnj_vv_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfsgnj_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnj_vf_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnj.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnj_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnj_vv_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnj.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnj_vv_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfsgnj_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnj_vf_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnj.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnj_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnj_vv_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnj.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnj_vv_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfsgnj_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnj_vf_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnj.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnj_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnj_vv_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnj.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnj_vv_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfsgnj_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnj_vf_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnj.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnj_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnj_vv_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnj.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnj_vv_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfsgnj_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnj_vf_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnj.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnj_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnj_vv_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnj.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnj_vv_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfsgnj_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnj_vf_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnj.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnj_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnj_vv_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnj.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnj_vv_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfsgnj_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnj_vf_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnj.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnj_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnj_vv_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnj.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnj_vv_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfsgnj_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnj_vf_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnj.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnj_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnj_vv_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnj.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnj_vv_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfsgnj_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnj_vf_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnj.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnj_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnj_vv_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnj.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnj_vv_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfsgnj_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnj_vf_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnj.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnj_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnj_vv_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnj.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnj_vv_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfsgnj_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnj_vf_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnj.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnj_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnj_vv_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnj.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnj_vv_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfsgnj_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnj_vf_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnj.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnj_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnj_vv_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnj.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnj_vv_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfsgnj_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnj_vf_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnj.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnj_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnj_vv_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnj.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnj_vv_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfsgnj_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnj_vf_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnj.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnj_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnj_vv_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnj.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnj_vv_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfsgnj_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnj_vf_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnj.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnj_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnj_mu(mask, maskedoff, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfsgnjn.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfsgnjn.c
new file mode 100644
index 0000000000000..1b799d87d8131
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfsgnjn.c
@@ -0,0 +1,489 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnjn_vv_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjn.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnjn_vv_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfsgnjn_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnjn_vf_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjn.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnjn_vf_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnjn_vv_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjn.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnjn_vv_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfsgnjn_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnjn_vf_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjn.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnjn_vf_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnjn_vv_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjn.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnjn_vv_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfsgnjn_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnjn_vf_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjn.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnjn_vf_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnjn_vv_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjn.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnjn_vv_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfsgnjn_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnjn_vf_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjn.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnjn_vf_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnjn_vv_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjn.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnjn_vv_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfsgnjn_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnjn_vf_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjn.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnjn_vf_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnjn_vv_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjn.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnjn_vv_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfsgnjn_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnjn_vf_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjn.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnjn_vf_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnjn_vv_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnjn_vv_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfsgnjn_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnjn_vf_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnjn_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnjn_vv_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnjn_vv_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfsgnjn_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnjn_vf_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnjn_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnjn_vv_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnjn_vv_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfsgnjn_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnjn_vf_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnjn_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnjn_vv_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnjn_vv_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfsgnjn_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnjn_vf_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnjn_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnjn_vv_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnjn_vv_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfsgnjn_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnjn_vf_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnjn_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnjn_vv_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnjn_vv_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfsgnjn_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnjn_vf_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnjn_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnjn_vv_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnjn_vv_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfsgnjn_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnjn_vf_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnjn_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnjn_vv_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnjn_vv_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfsgnjn_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnjn_vf_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnjn_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnjn_vv_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnjn_vv_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfsgnjn_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnjn_vf_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnjn_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnjn_vv_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnjn_vv_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfsgnjn_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnjn_vf_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnjn_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnjn_vv_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnjn_vv_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfsgnjn_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnjn_vf_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnjn_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnjn_vv_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnjn_vv_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfsgnjn_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnjn_vf_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnjn_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnjn_vv_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnjn_vv_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfsgnjn_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnjn_vf_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnjn_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnjn_vv_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnjn_vv_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfsgnjn_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnjn_vf_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnjn_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnjn_vv_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnjn_vv_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfsgnjn_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnjn_vf_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnjn_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnjn_vv_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnjn_vv_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfsgnjn_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnjn_vf_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnjn_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnjn_vv_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnjn_vv_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfsgnjn_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnjn_vf_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnjn_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnjn_vv_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnjn_vv_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfsgnjn_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnjn_vf_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjn.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnjn_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjn_mu(mask, maskedoff, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfsgnjx.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfsgnjx.c
new file mode 100644
index 0000000000000..9c5f2af3d6e8f
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfsgnjx.c
@@ -0,0 +1,489 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnjx_vv_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjx.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnjx_vv_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfsgnjx_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnjx_vf_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjx.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnjx_vf_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnjx_vv_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjx.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnjx_vv_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfsgnjx_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnjx_vf_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjx.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnjx_vf_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnjx_vv_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjx.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnjx_vv_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfsgnjx_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnjx_vf_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjx.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnjx_vf_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnjx_vv_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjx.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnjx_vv_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfsgnjx_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnjx_vf_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjx.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnjx_vf_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnjx_vv_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjx.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnjx_vv_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfsgnjx_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnjx_vf_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjx.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnjx_vf_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnjx_vv_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjx.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnjx_vv_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfsgnjx_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnjx_vf_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjx.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnjx_vf_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnjx_vv_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnjx_vv_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfsgnjx_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnjx_vf_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnjx_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnjx_vv_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnjx_vv_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfsgnjx_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnjx_vf_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnjx_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnjx_vv_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnjx_vv_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfsgnjx_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnjx_vf_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnjx_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnjx_vv_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnjx_vv_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfsgnjx_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnjx_vf_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnjx_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnjx_vv_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnjx_vv_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfsgnjx_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnjx_vf_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnjx_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnjx_vv_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnjx_vv_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfsgnjx_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnjx_vf_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnjx_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnjx_vv_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnjx_vv_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfsgnjx_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnjx_vf_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnjx_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnjx_vv_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnjx_vv_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfsgnjx_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnjx_vf_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnjx_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnjx_vv_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnjx_vv_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfsgnjx_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnjx_vf_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnjx_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnjx_vv_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnjx_vv_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfsgnjx_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnjx_vf_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnjx_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnjx_vv_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnjx_vv_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfsgnjx_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnjx_vf_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnjx_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnjx_vv_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnjx_vv_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfsgnjx_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnjx_vf_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnjx_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnjx_vv_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnjx_vv_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfsgnjx_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsgnjx_vf_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsgnjx_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnjx_vv_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnjx_vv_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfsgnjx_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsgnjx_vf_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsgnjx_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnjx_vv_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnjx_vv_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfsgnjx_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsgnjx_vf_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsgnjx_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnjx_vv_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnjx_vv_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfsgnjx_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsgnjx_vf_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsgnjx_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnjx_vv_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnjx_vv_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfsgnjx_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsgnjx_vf_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsgnjx_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnjx_vv_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnjx_vv_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfsgnjx_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsgnjx_vf_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsgnjx.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsgnjx_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsgnjx_mu(mask, maskedoff, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfslide1down.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfslide1down.c
new file mode 100644
index 0000000000000..691302e245427
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfslide1down.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfslide1down_vf_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfslide1down.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[SRC]], bfloat [[VALUE]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfslide1down_vf_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_tu(maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfslide1down_vf_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfslide1down.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[SRC]], bfloat [[VALUE]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfslide1down_vf_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_tu(maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfslide1down_vf_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfslide1down.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[SRC]], bfloat [[VALUE]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfslide1down_vf_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_tu(maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfslide1down_vf_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfslide1down.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[SRC]], bfloat [[VALUE]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfslide1down_vf_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_tu(maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfslide1down_vf_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfslide1down.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[SRC]], bfloat [[VALUE]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfslide1down_vf_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_tu(maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfslide1down_vf_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfslide1down.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[SRC]], bfloat [[VALUE]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfslide1down_vf_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_tu(maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfslide1down_vf_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfslide1down.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfslide1down_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_tum(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfslide1down_vf_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfslide1down.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfslide1down_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_tum(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfslide1down_vf_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfslide1down.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfslide1down_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_tum(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfslide1down_vf_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfslide1down.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfslide1down_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_tum(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfslide1down_vf_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfslide1down.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfslide1down_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_tum(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfslide1down_vf_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfslide1down.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfslide1down_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_tum(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfslide1down_vf_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfslide1down.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfslide1down_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_tumu(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfslide1down_vf_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfslide1down.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfslide1down_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_tumu(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfslide1down_vf_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfslide1down.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfslide1down_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_tumu(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfslide1down_vf_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfslide1down.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfslide1down_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_tumu(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfslide1down_vf_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfslide1down.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfslide1down_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_tumu(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfslide1down_vf_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfslide1down.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfslide1down_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_tumu(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfslide1down_vf_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfslide1down.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfslide1down_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_mu(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfslide1down_vf_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfslide1down.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfslide1down_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_mu(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfslide1down_vf_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfslide1down.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfslide1down_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_mu(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfslide1down_vf_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfslide1down.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfslide1down_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_mu(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfslide1down_vf_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfslide1down.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfslide1down_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_mu(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfslide1down_vf_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfslide1down.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfslide1down_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1down_mu(mask, maskedoff, src, value, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfslide1up.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfslide1up.c
new file mode 100644
index 0000000000000..1238d2204c6d4
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfslide1up.c
@@ -0,0 +1,249 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfslide1up_vf_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfslide1up.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[SRC]], bfloat [[VALUE]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfslide1up_vf_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_tu(maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfslide1up_vf_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfslide1up.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[SRC]], bfloat [[VALUE]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfslide1up_vf_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_tu(maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfslide1up_vf_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfslide1up.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[SRC]], bfloat [[VALUE]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfslide1up_vf_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_tu(maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfslide1up_vf_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfslide1up.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[SRC]], bfloat [[VALUE]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfslide1up_vf_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_tu(maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfslide1up_vf_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfslide1up.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[SRC]], bfloat [[VALUE]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfslide1up_vf_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_tu(maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfslide1up_vf_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfslide1up.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[SRC]], bfloat [[VALUE]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfslide1up_vf_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_tu(maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfslide1up_vf_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfslide1up.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfslide1up_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_tum(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfslide1up_vf_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfslide1up.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfslide1up_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_tum(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfslide1up_vf_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfslide1up.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfslide1up_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_tum(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfslide1up_vf_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfslide1up.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfslide1up_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_tum(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfslide1up_vf_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfslide1up.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfslide1up_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_tum(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfslide1up_vf_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfslide1up.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfslide1up_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_tum(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfslide1up_vf_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfslide1up.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfslide1up_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_tumu(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfslide1up_vf_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfslide1up.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfslide1up_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_tumu(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfslide1up_vf_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfslide1up.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfslide1up_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_tumu(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfslide1up_vf_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfslide1up.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfslide1up_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_tumu(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfslide1up_vf_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfslide1up.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfslide1up_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_tumu(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfslide1up_vf_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfslide1up.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfslide1up_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_tumu(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfslide1up_vf_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfslide1up.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfslide1up_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_mu(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfslide1up_vf_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfslide1up.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfslide1up_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_mu(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfslide1up_vf_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfslide1up.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfslide1up_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_mu(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfslide1up_vf_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfslide1up.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfslide1up_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_mu(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfslide1up_vf_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfslide1up.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfslide1up_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_mu(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfslide1up_vf_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfslide1up.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[SRC]], bfloat [[VALUE]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfslide1up_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t src, __bf16 value, size_t vl) {
+ return __riscv_vfslide1up_mu(mask, maskedoff, src, value, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfsub.c
new file mode 100644
index 0000000000000..ea4f8f043d6bb
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfsub.c
@@ -0,0 +1,489 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsub_vv_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsub.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsub_vv_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfsub_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsub_vf_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsub.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsub_vf_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsub_vv_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsub.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsub_vv_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfsub_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsub_vf_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsub.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsub_vf_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsub_vv_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsub.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsub_vv_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfsub_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsub_vf_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsub.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsub_vf_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsub_vv_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsub.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsub_vv_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfsub_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsub_vf_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsub.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsub_vf_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsub_vv_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsub.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsub_vv_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfsub_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsub_vf_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsub.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsub_vf_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsub_vv_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsub.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsub_vv_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfsub_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsub_vf_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsub.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsub_vf_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsub_vv_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsub.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsub_vv_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfsub_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsub_vf_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsub.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsub_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsub_vv_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsub.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsub_vv_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfsub_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsub_vf_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsub.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsub_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsub_vv_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsub.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsub_vv_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfsub_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsub_vf_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsub.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsub_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsub_vv_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsub.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsub_vv_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfsub_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsub_vf_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsub.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsub_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsub_vv_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsub.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsub_vv_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfsub_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsub_vf_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsub.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsub_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsub_vv_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsub.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsub_vv_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfsub_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsub_vf_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsub.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsub_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsub_vv_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsub.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsub_vv_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfsub_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsub_vf_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsub.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsub_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsub_vv_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsub.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsub_vv_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfsub_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsub_vf_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsub.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsub_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsub_vv_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsub.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsub_vv_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfsub_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsub_vf_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsub.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsub_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsub_vv_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsub.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsub_vv_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfsub_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsub_vf_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsub.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsub_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsub_vv_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsub.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsub_vv_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfsub_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsub_vf_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsub.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsub_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsub_vv_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsub.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsub_vv_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfsub_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsub_vf_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsub.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsub_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsub_vv_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsub.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsub_vv_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vfsub_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfsub_vf_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfsub.mask.nxv1bf16.bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfsub_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsub_vv_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsub.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsub_vv_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vfsub_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfsub_vf_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfsub.mask.nxv2bf16.bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfsub_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsub_vv_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsub.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsub_vv_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vfsub_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfsub_vf_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfsub.mask.nxv4bf16.bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfsub_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsub_vv_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsub.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsub_vv_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vfsub_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfsub_vf_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfsub.mask.nxv8bf16.bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfsub_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsub_vv_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsub.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsub_vv_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vfsub_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfsub_vf_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfsub.mask.nxv16bf16.bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfsub_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsub_vv_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsub.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsub_vv_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vfsub_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfsub_vf_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfsub.mask.nxv32bf16.bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfsub_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vfsub_mu(mask, maskedoff, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfwadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfwadd.c
new file mode 100644
index 0000000000000..e5b7b8da1f3cc
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfwadd.c
@@ -0,0 +1,1932 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_vv_bf16mf4_f32mf2_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_vv_bf16mf4_f32mf2_tu(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_vf_bf16mf4_f32mf2_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_vf_bf16mf4_f32mf2_tu(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_wv_bf16mf4_f32mf2_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_wv_bf16mf4_f32mf2_tu(vfloat32mf2_t vd,
+ vfloat32mf2_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_wf_bf16_f32mf2_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.nxv1f32.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_wf_bf16_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_wf_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_vv_bf16mf2_f32m1_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_vv_bf16mf2_f32m1_tu(vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_vf_bf16mf2_f32m1_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_vf_bf16mf2_f32m1_tu(vfloat32m1_t vd,
+ vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_wv_bf16mf2_f32m1_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_wv_bf16mf2_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_wf_bf16_f32m1_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.nxv2f32.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_wf_bf16_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_wf_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_vv_bf16m1_f32m2_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_vv_bf16m1_f32m2_tu(vfloat32m2_t vd, vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_vf_bf16m1_f32m2_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_vf_bf16m1_f32m2_tu(vfloat32m2_t vd, vbfloat16m1_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_vf_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_wv_bf16m1_f32m2_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_wv_bf16m1_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_wf_bf16_f32m2_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.nxv4f32.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_wf_bf16_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_wf_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_vv_bf16m2_f32m4_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_vv_bf16m2_f32m4_tu(vfloat32m4_t vd, vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_vf_bf16m2_f32m4_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_vf_bf16m2_f32m4_tu(vfloat32m4_t vd, vbfloat16m2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_vf_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_wv_bf16m2_f32m4_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_wv_bf16m2_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_wf_bf16_f32m4_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.nxv8f32.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_wf_bf16_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_wf_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_vv_bf16m4_f32m8_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_vv_bf16m4_f32m8_tu(vfloat32m8_t vd, vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_vf_bf16m4_f32m8_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_vf_bf16m4_f32m8_tu(vfloat32m8_t vd, vbfloat16m4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_vf_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_wv_bf16m4_f32m8_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_wv_bf16m4_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_wf_bf16_f32m8_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.nxv16f32.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_wf_bf16_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_wf_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_vv_bf16mf4_f32mf2_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_vv_bf16mf4_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_vf_bf16mf4_f32mf2_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_vf_bf16mf4_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_wv_bf16mf4_f32mf2_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_wv_bf16mf4_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd,
+ vfloat32mf2_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_wf_bf16_f32mf2_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f32.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_wf_bf16_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd,
+ vfloat32mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_vv_bf16mf2_f32m1_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_vv_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_vf_bf16mf2_f32m1_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_vf_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_wv_bf16mf2_f32m1_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_wv_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd,
+ vfloat32m1_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_wf_bf16_f32m1_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f32.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_wf_bf16_f32m1_tum(vbool32_t vm, vfloat32m1_t vd,
+ vfloat32m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_vv_bf16m1_f32m2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_vv_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_vf_bf16m1_f32m2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_vf_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_wv_bf16m1_f32m2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_wv_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd,
+ vfloat32m2_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_wf_bf16_f32m2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f32.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_wf_bf16_f32m2_tum(vbool16_t vm, vfloat32m2_t vd,
+ vfloat32m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_vv_bf16m2_f32m4_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_vv_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_vf_bf16m2_f32m4_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_vf_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_wv_bf16m2_f32m4_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_wv_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd,
+ vfloat32m4_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_wf_bf16_f32m4_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f32.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_wf_bf16_f32m4_tum(vbool8_t vm, vfloat32m4_t vd,
+ vfloat32m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_vv_bf16m4_f32m8_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_vv_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_vf_bf16m4_f32m8_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_vf_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_wv_bf16m4_f32m8_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_wv_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd,
+ vfloat32m8_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_wf_bf16_f32m8_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f32.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_wf_bf16_f32m8_tum(vbool4_t vm, vfloat32m8_t vd,
+ vfloat32m8_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_vv_bf16mf4_f32mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_vv_bf16mf4_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_vv_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_vf_bf16mf4_f32mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_vf_bf16mf4_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_wv_bf16mf4_f32mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_wv_bf16mf4_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd,
+ vfloat32mf2_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_wv_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_wf_bf16_f32mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f32.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_wf_bf16_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd,
+ vfloat32mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_vv_bf16mf2_f32m1_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_vv_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_vf_bf16mf2_f32m1_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_vf_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_wv_bf16mf2_f32m1_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_wv_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vfloat32m1_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_wf_bf16_f32m1_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f32.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_wf_bf16_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vfloat32m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_vv_bf16m1_f32m2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_vv_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_vf_bf16m1_f32m2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_vf_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_wv_bf16m1_f32m2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_wv_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vfloat32m2_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_wf_bf16_f32m2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f32.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_wf_bf16_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vfloat32m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_vv_bf16m2_f32m4_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_vv_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_vf_bf16m2_f32m4_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_vf_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_wv_bf16m2_f32m4_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_wv_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vfloat32m4_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_wf_bf16_f32m4_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f32.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_wf_bf16_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vfloat32m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_vv_bf16m4_f32m8_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_vv_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_vf_bf16m4_f32m8_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_vf_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_wv_bf16m4_f32m8_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_wv_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vfloat32m8_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_wf_bf16_f32m8_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f32.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_wf_bf16_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vfloat32m8_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_vv_bf16mf4_f32mf2_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_vv_bf16mf4_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_vf_bf16mf4_f32mf2_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_vf_bf16mf4_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_wv_bf16mf4_f32mf2_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_wv_bf16mf4_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd,
+ vfloat32mf2_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_wf_bf16_f32mf2_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f32.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_wf_bf16_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd,
+ vfloat32mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_vv_bf16mf2_f32m1_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_vv_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_vf_bf16mf2_f32m1_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_vf_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_wv_bf16mf2_f32m1_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_wv_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd,
+ vfloat32m1_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_wf_bf16_f32m1_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f32.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_wf_bf16_f32m1_mu(vbool32_t vm, vfloat32m1_t vd,
+ vfloat32m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_vv_bf16m1_f32m2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_vv_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_vf_bf16m1_f32m2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_vf_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_wv_bf16m1_f32m2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_wv_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd,
+ vfloat32m2_t vs2, vbfloat16m1_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_wv_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_wf_bf16_f32m2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f32.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_wf_bf16_f32m2_mu(vbool16_t vm, vfloat32m2_t vd,
+ vfloat32m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_vv_bf16m2_f32m4_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_vv_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_vf_bf16m2_f32m4_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_vf_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_wv_bf16m2_f32m4_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_wv_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd,
+ vfloat32m4_t vs2, vbfloat16m2_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_wv_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_wf_bf16_f32m4_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f32.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_wf_bf16_f32m4_mu(vbool8_t vm, vfloat32m4_t vd,
+ vfloat32m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_vv_bf16m4_f32m8_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_vv_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_vf_bf16m4_f32m8_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_vf_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_wv_bf16m4_f32m8_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_wv_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd,
+ vfloat32m8_t vs2, vbfloat16m4_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_wv_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_wf_bf16_f32m8_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f32.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_wf_bf16_f32m8_mu(vbool4_t vm, vfloat32m8_t vd,
+ vfloat32m8_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_vv_bf16mf4_f32mf2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_vv_bf16mf4_f32mf2_rm_tu(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_vv_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_vf_bf16mf4_f32mf2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_vf_bf16mf4_f32mf2_rm_tu(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_vf_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_wv_bf16mf4_f32mf2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_wv_bf16mf4_f32mf2_rm_tu(vfloat32mf2_t vd,
+ vfloat32mf2_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_wv_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_wf_bf16_f32mf2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.nxv1f32.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_wf_bf16_f32mf2_rm_tu(vfloat32mf2_t vd,
+ vfloat32mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_vv_bf16mf2_f32m1_rm_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_vv_bf16mf2_f32m1_rm_tu(vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_vf_bf16mf2_f32m1_rm_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_vf_bf16mf2_f32m1_rm_tu(vfloat32m1_t vd,
+ vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_wv_bf16mf2_f32m1_rm_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_wv_bf16mf2_f32m1_rm_tu(vfloat32m1_t vd,
+ vfloat32m1_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_wf_bf16_f32m1_rm_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.nxv2f32.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_wf_bf16_f32m1_rm_tu(vfloat32m1_t vd, vfloat32m1_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_wf_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_vv_bf16m1_f32m2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_vv_bf16m1_f32m2_rm_tu(vfloat32m2_t vd,
+ vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_vf_bf16m1_f32m2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_vf_bf16m1_f32m2_rm_tu(vfloat32m2_t vd,
+ vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_wv_bf16m1_f32m2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_wv_bf16m1_f32m2_rm_tu(vfloat32m2_t vd,
+ vfloat32m2_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_wf_bf16_f32m2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.nxv4f32.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_wf_bf16_f32m2_rm_tu(vfloat32m2_t vd, vfloat32m2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_wf_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_vv_bf16m2_f32m4_rm_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_vv_bf16m2_f32m4_rm_tu(vfloat32m4_t vd,
+ vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_vf_bf16m2_f32m4_rm_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_vf_bf16m2_f32m4_rm_tu(vfloat32m4_t vd,
+ vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_wv_bf16m2_f32m4_rm_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_wv_bf16m2_f32m4_rm_tu(vfloat32m4_t vd,
+ vfloat32m4_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_wf_bf16_f32m4_rm_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.nxv8f32.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_wf_bf16_f32m4_rm_tu(vfloat32m4_t vd, vfloat32m4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_wf_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_vv_bf16m4_f32m8_rm_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_vv_bf16m4_f32m8_rm_tu(vfloat32m8_t vd,
+ vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_vf_bf16m4_f32m8_rm_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_vf_bf16m4_f32m8_rm_tu(vfloat32m8_t vd,
+ vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_wv_bf16m4_f32m8_rm_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_wv_bf16m4_f32m8_rm_tu(vfloat32m8_t vd,
+ vfloat32m8_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_wf_bf16_f32m8_rm_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.nxv16f32.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_wf_bf16_f32m8_rm_tu(vfloat32m8_t vd, vfloat32m8_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_wf_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_vv_bf16mf4_f32mf2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_vv_bf16mf4_f32mf2_rm_tum(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_vv_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_vf_bf16mf4_f32mf2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_vf_bf16mf4_f32mf2_rm_tum(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_vf_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_wv_bf16mf4_f32mf2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_wv_bf16mf4_f32mf2_rm_tum(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vfloat32mf2_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_wv_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_wf_bf16_f32mf2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f32.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_wf_bf16_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd,
+ vfloat32mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_vv_bf16mf2_f32m1_rm_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_vv_bf16mf2_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_vv_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_vf_bf16mf2_f32m1_rm_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_vf_bf16mf2_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_wv_bf16mf2_f32m1_rm_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_wv_bf16mf2_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd,
+ vfloat32m1_t vs2,
+ vbfloat16mf2_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_wv_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_wf_bf16_f32m1_rm_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f32.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_wf_bf16_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd,
+ vfloat32m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_vv_bf16m1_f32m2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_vv_bf16m1_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_vf_bf16m1_f32m2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_vf_bf16m1_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_wv_bf16m1_f32m2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_wv_bf16m1_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd,
+ vfloat32m2_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_wf_bf16_f32m2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f32.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_wf_bf16_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd,
+ vfloat32m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_vv_bf16m2_f32m4_rm_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_vv_bf16m2_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_vf_bf16m2_f32m4_rm_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_vf_bf16m2_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_wv_bf16m2_f32m4_rm_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_wv_bf16m2_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd,
+ vfloat32m4_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_wf_bf16_f32m4_rm_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f32.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_wf_bf16_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd,
+ vfloat32m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_vv_bf16m4_f32m8_rm_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_vv_bf16m4_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_vf_bf16m4_f32m8_rm_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_vf_bf16m4_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_wv_bf16m4_f32m8_rm_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_wv_bf16m4_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd,
+ vfloat32m8_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_wf_bf16_f32m8_rm_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f32.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_wf_bf16_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd,
+ vfloat32m8_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_vv_bf16mf4_f32mf2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_vv_bf16mf4_f32mf2_rm_tumu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_vv_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_vf_bf16mf4_f32mf2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_vf_bf16mf4_f32mf2_rm_tumu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_vf_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_wv_bf16mf4_f32mf2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_wv_bf16mf4_f32mf2_rm_tumu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vfloat32mf2_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_wv_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_wf_bf16_f32mf2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f32.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_wf_bf16_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd,
+ vfloat32mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_vv_bf16mf2_f32m1_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_vv_bf16mf2_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_vv_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_vf_bf16mf2_f32m1_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_vf_bf16mf2_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_vf_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_wv_bf16mf2_f32m1_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_wv_bf16mf2_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vfloat32m1_t vs2,
+ vbfloat16mf2_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_wv_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_wf_bf16_f32m1_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f32.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_wf_bf16_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vfloat32m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_vv_bf16m1_f32m2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_vv_bf16m1_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_vf_bf16m1_f32m2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_vf_bf16m1_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_wv_bf16m1_f32m2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_wv_bf16m1_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vfloat32m2_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_wf_bf16_f32m2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f32.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_wf_bf16_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vfloat32m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_vv_bf16m2_f32m4_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_vv_bf16m2_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_vf_bf16m2_f32m4_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_vf_bf16m2_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_wv_bf16m2_f32m4_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_wv_bf16m2_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vfloat32m4_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_wf_bf16_f32m4_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f32.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_wf_bf16_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vfloat32m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_vv_bf16m4_f32m8_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_vv_bf16m4_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_vf_bf16m4_f32m8_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_vf_bf16m4_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_wv_bf16m4_f32m8_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_wv_bf16m4_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vfloat32m8_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_wf_bf16_f32m8_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f32.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_wf_bf16_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vfloat32m8_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_vv_bf16mf4_f32mf2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_vv_bf16mf4_f32mf2_rm_mu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_vv_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_vf_bf16mf4_f32mf2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_vf_bf16mf4_f32mf2_rm_mu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwadd_vf_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_wv_bf16mf4_f32mf2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_wv_bf16mf4_f32mf2_rm_mu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vfloat32mf2_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwadd_wv_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwadd_wf_bf16_f32mf2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f32.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwadd_wf_bf16_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd,
+ vfloat32mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_vv_bf16mf2_f32m1_rm_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_vv_bf16mf2_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_vf_bf16mf2_f32m1_rm_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_vf_bf16mf2_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_wv_bf16mf2_f32m1_rm_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_wv_bf16mf2_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd,
+ vfloat32m1_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwadd_wf_bf16_f32m1_rm_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f32.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwadd_wf_bf16_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd,
+ vfloat32m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_vv_bf16m1_f32m2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_vv_bf16m1_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_vf_bf16m1_f32m2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_vf_bf16m1_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_wv_bf16m1_f32m2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_wv_bf16m1_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd,
+ vfloat32m2_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwadd_wf_bf16_f32m2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f32.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwadd_wf_bf16_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd,
+ vfloat32m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_vv_bf16m2_f32m4_rm_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_vv_bf16m2_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_vf_bf16m2_f32m4_rm_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_vf_bf16m2_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_wv_bf16m2_f32m4_rm_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_wv_bf16m2_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd,
+ vfloat32m4_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwadd_wf_bf16_f32m4_rm_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f32.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwadd_wf_bf16_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd,
+ vfloat32m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_vv_bf16m4_f32m8_rm_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_vv_bf16m4_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwadd_vv_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_vf_bf16m4_f32m8_rm_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_vf_bf16m4_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_vf_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_wv_bf16m4_f32m8_rm_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_wv_bf16m4_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd,
+ vfloat32m8_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwadd_wv_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwadd_wf_bf16_f32m8_rm_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f32.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwadd_wf_bf16_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd,
+ vfloat32m8_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwadd_wf_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfwcvt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfwcvt.c
new file mode 100644
index 0000000000000..730010421b944
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfwcvt.c
@@ -0,0 +1,765 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfwcvt_f_x_v_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfwcvt.f.x.v.nxv1bf16.nxv1i8.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfwcvt_f_x_v_bf16mf4_tu(vbfloat16mf4_t vd, vint8mf8_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_bf16_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfwcvt_f_x_v_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfwcvt.f.x.v.nxv2bf16.nxv2i8.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfwcvt_f_x_v_bf16mf2_tu(vbfloat16mf2_t vd, vint8mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_bf16_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfwcvt_f_x_v_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfwcvt.f.x.v.nxv4bf16.nxv4i8.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfwcvt_f_x_v_bf16m1_tu(vbfloat16m1_t vd, vint8mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_bf16_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfwcvt_f_x_v_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfwcvt.f.x.v.nxv8bf16.nxv8i8.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfwcvt_f_x_v_bf16m2_tu(vbfloat16m2_t vd, vint8m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_bf16_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfwcvt_f_x_v_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfwcvt.f.x.v.nxv16bf16.nxv16i8.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfwcvt_f_x_v_bf16m4_tu(vbfloat16m4_t vd, vint8m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_bf16_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfwcvt_f_x_v_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfwcvt.f.x.v.nxv32bf16.nxv32i8.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfwcvt_f_x_v_bf16m8_tu(vbfloat16m8_t vd, vint8m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_bf16_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfwcvt_f_xu_v_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.nxv1bf16.nxv1i8.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfwcvt_f_xu_v_bf16mf4_tu(vbfloat16mf4_t vd, vuint8mf8_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_bf16_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfwcvt_f_xu_v_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.nxv2bf16.nxv2i8.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfwcvt_f_xu_v_bf16mf2_tu(vbfloat16mf2_t vd, vuint8mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_bf16_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfwcvt_f_xu_v_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.nxv4bf16.nxv4i8.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfwcvt_f_xu_v_bf16m1_tu(vbfloat16m1_t vd, vuint8mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_bf16_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfwcvt_f_xu_v_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.nxv8bf16.nxv8i8.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfwcvt_f_xu_v_bf16m2_tu(vbfloat16m2_t vd, vuint8m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_bf16_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfwcvt_f_xu_v_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.nxv16bf16.nxv16i8.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfwcvt_f_xu_v_bf16m4_tu(vbfloat16m4_t vd, vuint8m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_bf16_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfwcvt_f_xu_v_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.nxv32bf16.nxv32i8.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfwcvt_f_xu_v_bf16m8_tu(vbfloat16m8_t vd, vuint8m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_bf16_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwcvt_f_f_v_bf16mf4_f32mf2_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwcvt.f.f.v.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwcvt_f_f_v_bf16mf4_f32mf2_tu(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwcvt_f_f_v_bf16mf2_f32m1_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwcvt.f.f.v.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwcvt_f_f_v_bf16mf2_f32m1_tu(vfloat32m1_t vd,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwcvt_f_f_v_bf16m1_f32m2_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwcvt.f.f.v.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwcvt_f_f_v_bf16m1_f32m2_tu(vfloat32m2_t vd,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwcvt_f_f_v_bf16m2_f32m4_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwcvt.f.f.v.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwcvt_f_f_v_bf16m2_f32m4_tu(vfloat32m4_t vd,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwcvt_f_f_v_bf16m4_f32m8_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwcvt.f.f.v.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwcvt_f_f_v_bf16m4_f32m8_tu(vfloat32m8_t vd,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfwcvt_f_x_v_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv1bf16.nxv1i8.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfwcvt_f_x_v_bf16mf4_tum(vbool64_t vm, vbfloat16mf4_t vd,
+ vint8mf8_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_bf16_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfwcvt_f_x_v_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv2bf16.nxv2i8.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfwcvt_f_x_v_bf16mf2_tum(vbool32_t vm, vbfloat16mf2_t vd,
+ vint8mf4_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_bf16_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfwcvt_f_x_v_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv4bf16.nxv4i8.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfwcvt_f_x_v_bf16m1_tum(vbool16_t vm, vbfloat16m1_t vd,
+ vint8mf2_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_bf16_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfwcvt_f_x_v_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv8bf16.nxv8i8.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfwcvt_f_x_v_bf16m2_tum(vbool8_t vm, vbfloat16m2_t vd,
+ vint8m1_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_bf16_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfwcvt_f_x_v_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv16bf16.nxv16i8.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfwcvt_f_x_v_bf16m4_tum(vbool4_t vm, vbfloat16m4_t vd,
+ vint8m2_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_bf16_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfwcvt_f_x_v_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv32bf16.nxv32i8.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfwcvt_f_x_v_bf16m8_tum(vbool2_t vm, vbfloat16m8_t vd,
+ vint8m4_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_bf16_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfwcvt_f_xu_v_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv1bf16.nxv1i8.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfwcvt_f_xu_v_bf16mf4_tum(vbool64_t vm, vbfloat16mf4_t vd,
+ vuint8mf8_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_bf16_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfwcvt_f_xu_v_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv2bf16.nxv2i8.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfwcvt_f_xu_v_bf16mf2_tum(vbool32_t vm, vbfloat16mf2_t vd,
+ vuint8mf4_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_bf16_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfwcvt_f_xu_v_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv4bf16.nxv4i8.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfwcvt_f_xu_v_bf16m1_tum(vbool16_t vm, vbfloat16m1_t vd,
+ vuint8mf2_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_bf16_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfwcvt_f_xu_v_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv8bf16.nxv8i8.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfwcvt_f_xu_v_bf16m2_tum(vbool8_t vm, vbfloat16m2_t vd,
+ vuint8m1_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_bf16_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfwcvt_f_xu_v_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv16bf16.nxv16i8.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfwcvt_f_xu_v_bf16m4_tum(vbool4_t vm, vbfloat16m4_t vd,
+ vuint8m2_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_bf16_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfwcvt_f_xu_v_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv32bf16.nxv32i8.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfwcvt_f_xu_v_bf16m8_tum(vbool2_t vm, vbfloat16m8_t vd,
+ vuint8m4_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_bf16_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwcvt_f_f_v_bf16mf4_f32mf2_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwcvt_f_f_v_bf16mf4_f32mf2_tum(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwcvt_f_f_v_bf16mf2_f32m1_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwcvt_f_f_v_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwcvt_f_f_v_bf16m1_f32m2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwcvt_f_f_v_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwcvt_f_f_v_bf16m2_f32m4_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwcvt_f_f_v_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwcvt_f_f_v_bf16m4_f32m8_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwcvt_f_f_v_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfwcvt_f_x_v_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv1bf16.nxv1i8.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfwcvt_f_x_v_bf16mf4_tumu(vbool64_t vm, vbfloat16mf4_t vd,
+ vint8mf8_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_bf16_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfwcvt_f_x_v_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv2bf16.nxv2i8.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfwcvt_f_x_v_bf16mf2_tumu(vbool32_t vm, vbfloat16mf2_t vd,
+ vint8mf4_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_bf16_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfwcvt_f_x_v_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv4bf16.nxv4i8.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfwcvt_f_x_v_bf16m1_tumu(vbool16_t vm, vbfloat16m1_t vd,
+ vint8mf2_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_bf16_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfwcvt_f_x_v_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv8bf16.nxv8i8.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfwcvt_f_x_v_bf16m2_tumu(vbool8_t vm, vbfloat16m2_t vd,
+ vint8m1_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_bf16_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfwcvt_f_x_v_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv16bf16.nxv16i8.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfwcvt_f_x_v_bf16m4_tumu(vbool4_t vm, vbfloat16m4_t vd,
+ vint8m2_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_bf16_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfwcvt_f_x_v_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv32bf16.nxv32i8.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfwcvt_f_x_v_bf16m8_tumu(vbool2_t vm, vbfloat16m8_t vd,
+ vint8m4_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_bf16_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfwcvt_f_xu_v_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv1bf16.nxv1i8.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfwcvt_f_xu_v_bf16mf4_tumu(vbool64_t vm, vbfloat16mf4_t vd,
+ vuint8mf8_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_bf16_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfwcvt_f_xu_v_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv2bf16.nxv2i8.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfwcvt_f_xu_v_bf16mf2_tumu(vbool32_t vm, vbfloat16mf2_t vd,
+ vuint8mf4_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_bf16_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfwcvt_f_xu_v_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv4bf16.nxv4i8.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfwcvt_f_xu_v_bf16m1_tumu(vbool16_t vm, vbfloat16m1_t vd,
+ vuint8mf2_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_bf16_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfwcvt_f_xu_v_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv8bf16.nxv8i8.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfwcvt_f_xu_v_bf16m2_tumu(vbool8_t vm, vbfloat16m2_t vd,
+ vuint8m1_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_bf16_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfwcvt_f_xu_v_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv16bf16.nxv16i8.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfwcvt_f_xu_v_bf16m4_tumu(vbool4_t vm, vbfloat16m4_t vd,
+ vuint8m2_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_bf16_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfwcvt_f_xu_v_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv32bf16.nxv32i8.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfwcvt_f_xu_v_bf16m8_tumu(vbool2_t vm, vbfloat16m8_t vd,
+ vuint8m4_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_bf16_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwcvt_f_f_v_bf16mf4_f32mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwcvt_f_f_v_bf16mf4_f32mf2_tumu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwcvt_f_f_v_bf16mf2_f32m1_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwcvt_f_f_v_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwcvt_f_f_v_bf16m1_f32m2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwcvt_f_f_v_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwcvt_f_f_v_bf16m2_f32m4_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwcvt_f_f_v_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwcvt_f_f_v_bf16m4_f32m8_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwcvt_f_f_v_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfwcvt_f_x_v_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv1bf16.nxv1i8.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfwcvt_f_x_v_bf16mf4_mu(vbool64_t vm, vbfloat16mf4_t vd,
+ vint8mf8_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_bf16_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfwcvt_f_x_v_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv2bf16.nxv2i8.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfwcvt_f_x_v_bf16mf2_mu(vbool32_t vm, vbfloat16mf2_t vd,
+ vint8mf4_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_bf16_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfwcvt_f_x_v_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv4bf16.nxv4i8.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfwcvt_f_x_v_bf16m1_mu(vbool16_t vm, vbfloat16m1_t vd,
+ vint8mf2_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_bf16_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfwcvt_f_x_v_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv8bf16.nxv8i8.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfwcvt_f_x_v_bf16m2_mu(vbool8_t vm, vbfloat16m2_t vd,
+ vint8m1_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_bf16_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfwcvt_f_x_v_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv16bf16.nxv16i8.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfwcvt_f_x_v_bf16m4_mu(vbool4_t vm, vbfloat16m4_t vd,
+ vint8m2_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_bf16_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfwcvt_f_x_v_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfwcvt.f.x.v.mask.nxv32bf16.nxv32i8.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfwcvt_f_x_v_bf16m8_mu(vbool2_t vm, vbfloat16m8_t vd,
+ vint8m4_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_bf16_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vfwcvt_f_xu_v_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv1bf16.nxv1i8.i64(<vscale x 1 x bfloat> [[VD]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vfwcvt_f_xu_v_bf16mf4_mu(vbool64_t vm, vbfloat16mf4_t vd,
+ vuint8mf8_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_bf16_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vfwcvt_f_xu_v_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv2bf16.nxv2i8.i64(<vscale x 2 x bfloat> [[VD]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vfwcvt_f_xu_v_bf16mf2_mu(vbool32_t vm, vbfloat16mf2_t vd,
+ vuint8mf4_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_bf16_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vfwcvt_f_xu_v_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv4bf16.nxv4i8.i64(<vscale x 4 x bfloat> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vfwcvt_f_xu_v_bf16m1_mu(vbool16_t vm, vbfloat16m1_t vd,
+ vuint8mf2_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_bf16_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vfwcvt_f_xu_v_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv8bf16.nxv8i8.i64(<vscale x 8 x bfloat> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vfwcvt_f_xu_v_bf16m2_mu(vbool8_t vm, vbfloat16m2_t vd,
+ vuint8m1_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_bf16_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vfwcvt_f_xu_v_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv16bf16.nxv16i8.i64(<vscale x 16 x bfloat> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vfwcvt_f_xu_v_bf16m4_mu(vbool4_t vm, vbfloat16m4_t vd,
+ vuint8m2_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_bf16_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vfwcvt_f_xu_v_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv32bf16.nxv32i8.i64(<vscale x 32 x bfloat> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vfwcvt_f_xu_v_bf16m8_mu(vbool2_t vm, vbfloat16m8_t vd,
+ vuint8m4_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_bf16_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwcvt_f_f_v_bf16mf4_f32mf2_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwcvt_f_f_v_bf16mf4_f32mf2_mu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwcvt_f_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwcvt_f_f_v_bf16mf2_f32m1_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwcvt_f_f_v_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwcvt_f_f_v_bf16m1_f32m2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwcvt_f_f_v_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwcvt_f_f_v_bf16m2_f32m4_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwcvt_f_f_v_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwcvt_f_f_v_bf16m4_f32m8_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwcvt_f_f_v_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwcvt_f_mu(vm, vd, vs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfwmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfwmacc.c
new file mode 100644
index 0000000000000..b05f88028874d
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfwmacc.c
@@ -0,0 +1,977 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmacc_vv_bf16mf4_f32mf2_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmacc_vv_bf16mf4_f32mf2_tu(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwmacc_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmacc_vf_bf16mf4_f32mf2_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmacc_vf_bf16mf4_f32mf2_tu(vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwmacc_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmacc_vv_bf16mf2_f32m1_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmacc_vv_bf16mf2_f32m1_tu(vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwmacc_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmacc_vf_bf16mf2_f32m1_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmacc_vf_bf16mf2_f32m1_tu(vfloat32m1_t vd, __bf16 vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwmacc_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmacc_vv_bf16m1_f32m2_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmacc_vv_bf16m1_f32m2_tu(vfloat32m2_t vd, vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmacc_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmacc_vf_bf16m1_f32m2_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmacc_vf_bf16m1_f32m2_tu(vfloat32m2_t vd, __bf16 vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmacc_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmacc_vv_bf16m2_f32m4_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmacc_vv_bf16m2_f32m4_tu(vfloat32m4_t vd, vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmacc_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmacc_vf_bf16m2_f32m4_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmacc_vf_bf16m2_f32m4_tu(vfloat32m4_t vd, __bf16 vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmacc_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmacc_vv_bf16m4_f32m8_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmacc_vv_bf16m4_f32m8_tu(vfloat32m8_t vd, vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmacc_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmacc_vf_bf16m4_f32m8_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmacc_vf_bf16m4_f32m8_tu(vfloat32m8_t vd, __bf16 vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmacc_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmacc_vv_bf16mf4_f32mf2_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmacc_vv_bf16mf4_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmacc_vf_bf16mf4_f32mf2_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmacc_vf_bf16mf4_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd,
+ __bf16 vs1, vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmacc_vv_bf16mf2_f32m1_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmacc_vv_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwmacc_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmacc_vf_bf16mf2_f32m1_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmacc_vf_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmacc_vv_bf16m1_f32m2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmacc_vv_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmacc_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmacc_vf_bf16m1_f32m2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmacc_vf_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmacc_vv_bf16m2_f32m4_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmacc_vv_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmacc_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmacc_vf_bf16m2_f32m4_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmacc_vf_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmacc_vv_bf16m4_f32m8_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmacc_vv_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmacc_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmacc_vf_bf16m4_f32m8_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmacc_vf_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmacc_vv_bf16mf4_f32mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmacc_vv_bf16mf4_f32mf2_tumu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmacc_vf_bf16mf4_f32mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmacc_vf_bf16mf4_f32mf2_tumu(vbool64_t vm,
+ vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmacc_vv_bf16mf2_f32m1_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmacc_vv_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwmacc_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmacc_vf_bf16mf2_f32m1_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmacc_vf_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmacc_vv_bf16m1_f32m2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmacc_vv_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmacc_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmacc_vf_bf16m1_f32m2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmacc_vf_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmacc_vv_bf16m2_f32m4_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmacc_vv_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmacc_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmacc_vf_bf16m2_f32m4_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmacc_vf_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmacc_vv_bf16m4_f32m8_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmacc_vv_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmacc_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmacc_vf_bf16m4_f32m8_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmacc_vf_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmacc_vv_bf16mf4_f32mf2_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmacc_vv_bf16mf4_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwmacc_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmacc_vf_bf16mf4_f32mf2_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmacc_vf_bf16mf4_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd,
+ __bf16 vs1, vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmacc_vv_bf16mf2_f32m1_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmacc_vv_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwmacc_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmacc_vf_bf16mf2_f32m1_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmacc_vf_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmacc_vv_bf16m1_f32m2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmacc_vv_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmacc_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmacc_vf_bf16m1_f32m2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmacc_vf_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmacc_vv_bf16m2_f32m4_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmacc_vv_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmacc_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmacc_vf_bf16m2_f32m4_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmacc_vf_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmacc_vv_bf16m4_f32m8_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmacc_vv_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmacc_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmacc_vf_bf16m4_f32m8_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmacc_vf_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmacc_vv_bf16mf4_f32mf2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmacc_vv_bf16mf4_f32mf2_rm_tu(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmacc_vf_bf16mf4_f32mf2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmacc_vf_bf16mf4_f32mf2_rm_tu(vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmacc_vv_bf16mf2_f32m1_rm_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmacc_vv_bf16mf2_f32m1_rm_tu(vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmacc_vf_bf16mf2_f32m1_rm_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmacc_vf_bf16mf2_f32m1_rm_tu(vfloat32m1_t vd, __bf16 vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmacc_vv_bf16m1_f32m2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmacc_vv_bf16m1_f32m2_rm_tu(vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmacc_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmacc_vf_bf16m1_f32m2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmacc_vf_bf16m1_f32m2_rm_tu(vfloat32m2_t vd, __bf16 vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmacc_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmacc_vv_bf16m2_f32m4_rm_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmacc_vv_bf16m2_f32m4_rm_tu(vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmacc_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmacc_vf_bf16m2_f32m4_rm_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmacc_vf_bf16m2_f32m4_rm_tu(vfloat32m4_t vd, __bf16 vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmacc_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmacc_vv_bf16m4_f32m8_rm_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmacc_vv_bf16m4_f32m8_rm_tu(vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmacc_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmacc_vf_bf16m4_f32m8_rm_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmacc_vf_bf16m4_f32m8_rm_tu(vfloat32m8_t vd, __bf16 vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmacc_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmacc_vv_bf16mf4_f32mf2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmacc_vv_bf16mf4_f32mf2_rm_tum(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmacc_vf_bf16mf4_f32mf2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmacc_vf_bf16mf4_f32mf2_rm_tum(
+ vbool64_t vm, vfloat32mf2_t vd, __bf16 vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwmacc_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmacc_vv_bf16mf2_f32m1_rm_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmacc_vv_bf16mf2_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmacc_vf_bf16mf2_f32m1_rm_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmacc_vf_bf16mf2_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmacc_vv_bf16m1_f32m2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmacc_vv_bf16m1_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmacc_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmacc_vf_bf16m1_f32m2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmacc_vf_bf16m1_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmacc_vv_bf16m2_f32m4_rm_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmacc_vv_bf16m2_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmacc_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmacc_vf_bf16m2_f32m4_rm_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmacc_vf_bf16m2_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmacc_vv_bf16m4_f32m8_rm_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmacc_vv_bf16m4_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmacc_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmacc_vf_bf16m4_f32m8_rm_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmacc_vf_bf16m4_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmacc_vv_bf16mf4_f32mf2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmacc_vv_bf16mf4_f32mf2_rm_tumu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmacc_vf_bf16mf4_f32mf2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmacc_vf_bf16mf4_f32mf2_rm_tumu(
+ vbool64_t vm, vfloat32mf2_t vd, __bf16 vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwmacc_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmacc_vv_bf16mf2_f32m1_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmacc_vv_bf16mf2_f32m1_rm_tumu(vbool32_t vm,
+ vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmacc_vf_bf16mf2_f32m1_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmacc_vf_bf16mf2_f32m1_rm_tumu(vbool32_t vm,
+ vfloat32m1_t vd, __bf16 vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmacc_vv_bf16m1_f32m2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmacc_vv_bf16m1_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmacc_vf_bf16m1_f32m2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmacc_vf_bf16m1_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmacc_vv_bf16m2_f32m4_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmacc_vv_bf16m2_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmacc_vf_bf16m2_f32m4_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmacc_vf_bf16m2_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmacc_vv_bf16m4_f32m8_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmacc_vv_bf16m4_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmacc_vf_bf16m4_f32m8_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmacc_vf_bf16m4_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmacc_vv_bf16mf4_f32mf2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmacc_vv_bf16mf4_f32mf2_rm_mu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmacc_vf_bf16mf4_f32mf2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmacc_vf_bf16mf4_f32mf2_rm_mu(vbool64_t vm,
+ vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmacc_vv_bf16mf2_f32m1_rm_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmacc_vv_bf16mf2_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmacc_vf_bf16mf2_f32m1_rm_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmacc_vf_bf16mf2_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmacc_vv_bf16m1_f32m2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmacc_vv_bf16m1_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmacc_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmacc_vf_bf16m1_f32m2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmacc_vf_bf16m1_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmacc_vv_bf16m2_f32m4_rm_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmacc_vv_bf16m2_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmacc_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmacc_vf_bf16m2_f32m4_rm_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmacc_vf_bf16m2_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmacc_vv_bf16m4_f32m8_rm_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmacc_vv_bf16m4_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmacc_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmacc_vf_bf16m4_f32m8_rm_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmacc_vf_bf16m4_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmacc_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfwmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfwmsac.c
new file mode 100644
index 0000000000000..93721f6a889d9
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfwmsac.c
@@ -0,0 +1,977 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmsac_vv_bf16mf4_f32mf2_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmsac_vv_bf16mf4_f32mf2_tu(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwmsac_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmsac_vf_bf16mf4_f32mf2_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmsac_vf_bf16mf4_f32mf2_tu(vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwmsac_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmsac_vv_bf16mf2_f32m1_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmsac_vv_bf16mf2_f32m1_tu(vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwmsac_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmsac_vf_bf16mf2_f32m1_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmsac_vf_bf16mf2_f32m1_tu(vfloat32m1_t vd, __bf16 vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwmsac_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmsac_vv_bf16m1_f32m2_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmsac_vv_bf16m1_f32m2_tu(vfloat32m2_t vd, vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmsac_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmsac_vf_bf16m1_f32m2_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmsac_vf_bf16m1_f32m2_tu(vfloat32m2_t vd, __bf16 vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmsac_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmsac_vv_bf16m2_f32m4_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmsac_vv_bf16m2_f32m4_tu(vfloat32m4_t vd, vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmsac_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmsac_vf_bf16m2_f32m4_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmsac_vf_bf16m2_f32m4_tu(vfloat32m4_t vd, __bf16 vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmsac_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmsac_vv_bf16m4_f32m8_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmsac_vv_bf16m4_f32m8_tu(vfloat32m8_t vd, vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmsac_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmsac_vf_bf16m4_f32m8_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmsac_vf_bf16m4_f32m8_tu(vfloat32m8_t vd, __bf16 vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmsac_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmsac_vv_bf16mf4_f32mf2_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmsac_vv_bf16mf4_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmsac_vf_bf16mf4_f32mf2_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmsac_vf_bf16mf4_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd,
+ __bf16 vs1, vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmsac_vv_bf16mf2_f32m1_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmsac_vv_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwmsac_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmsac_vf_bf16mf2_f32m1_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmsac_vf_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmsac_vv_bf16m1_f32m2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmsac_vv_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmsac_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmsac_vf_bf16m1_f32m2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmsac_vf_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmsac_vv_bf16m2_f32m4_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmsac_vv_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmsac_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmsac_vf_bf16m2_f32m4_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmsac_vf_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmsac_vv_bf16m4_f32m8_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmsac_vv_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmsac_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmsac_vf_bf16m4_f32m8_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmsac_vf_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmsac_vv_bf16mf4_f32mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmsac_vv_bf16mf4_f32mf2_tumu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmsac_vf_bf16mf4_f32mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmsac_vf_bf16mf4_f32mf2_tumu(vbool64_t vm,
+ vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmsac_vv_bf16mf2_f32m1_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmsac_vv_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwmsac_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmsac_vf_bf16mf2_f32m1_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmsac_vf_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmsac_vv_bf16m1_f32m2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmsac_vv_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmsac_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmsac_vf_bf16m1_f32m2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmsac_vf_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmsac_vv_bf16m2_f32m4_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmsac_vv_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmsac_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmsac_vf_bf16m2_f32m4_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmsac_vf_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmsac_vv_bf16m4_f32m8_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmsac_vv_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmsac_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmsac_vf_bf16m4_f32m8_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmsac_vf_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmsac_vv_bf16mf4_f32mf2_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmsac_vv_bf16mf4_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwmsac_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmsac_vf_bf16mf4_f32mf2_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmsac_vf_bf16mf4_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd,
+ __bf16 vs1, vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmsac_vv_bf16mf2_f32m1_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmsac_vv_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwmsac_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmsac_vf_bf16mf2_f32m1_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmsac_vf_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmsac_vv_bf16m1_f32m2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmsac_vv_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmsac_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmsac_vf_bf16m1_f32m2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmsac_vf_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmsac_vv_bf16m2_f32m4_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmsac_vv_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmsac_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmsac_vf_bf16m2_f32m4_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmsac_vf_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmsac_vv_bf16m4_f32m8_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmsac_vv_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmsac_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmsac_vf_bf16m4_f32m8_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmsac_vf_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmsac_vv_bf16mf4_f32mf2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmsac_vv_bf16mf4_f32mf2_rm_tu(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmsac_vf_bf16mf4_f32mf2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmsac_vf_bf16mf4_f32mf2_rm_tu(vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmsac_vv_bf16mf2_f32m1_rm_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmsac_vv_bf16mf2_f32m1_rm_tu(vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmsac_vf_bf16mf2_f32m1_rm_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmsac_vf_bf16mf2_f32m1_rm_tu(vfloat32m1_t vd, __bf16 vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmsac_vv_bf16m1_f32m2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmsac_vv_bf16m1_f32m2_rm_tu(vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmsac_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmsac_vf_bf16m1_f32m2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmsac_vf_bf16m1_f32m2_rm_tu(vfloat32m2_t vd, __bf16 vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmsac_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmsac_vv_bf16m2_f32m4_rm_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmsac_vv_bf16m2_f32m4_rm_tu(vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmsac_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmsac_vf_bf16m2_f32m4_rm_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmsac_vf_bf16m2_f32m4_rm_tu(vfloat32m4_t vd, __bf16 vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmsac_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmsac_vv_bf16m4_f32m8_rm_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmsac_vv_bf16m4_f32m8_rm_tu(vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmsac_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmsac_vf_bf16m4_f32m8_rm_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmsac_vf_bf16m4_f32m8_rm_tu(vfloat32m8_t vd, __bf16 vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmsac_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmsac_vv_bf16mf4_f32mf2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmsac_vv_bf16mf4_f32mf2_rm_tum(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmsac_vf_bf16mf4_f32mf2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmsac_vf_bf16mf4_f32mf2_rm_tum(
+ vbool64_t vm, vfloat32mf2_t vd, __bf16 vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwmsac_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmsac_vv_bf16mf2_f32m1_rm_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmsac_vv_bf16mf2_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmsac_vf_bf16mf2_f32m1_rm_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmsac_vf_bf16mf2_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmsac_vv_bf16m1_f32m2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmsac_vv_bf16m1_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmsac_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmsac_vf_bf16m1_f32m2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmsac_vf_bf16m1_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmsac_vv_bf16m2_f32m4_rm_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmsac_vv_bf16m2_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmsac_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmsac_vf_bf16m2_f32m4_rm_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmsac_vf_bf16m2_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmsac_vv_bf16m4_f32m8_rm_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmsac_vv_bf16m4_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmsac_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmsac_vf_bf16m4_f32m8_rm_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmsac_vf_bf16m4_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmsac_vv_bf16mf4_f32mf2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmsac_vv_bf16mf4_f32mf2_rm_tumu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmsac_vf_bf16mf4_f32mf2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmsac_vf_bf16mf4_f32mf2_rm_tumu(
+ vbool64_t vm, vfloat32mf2_t vd, __bf16 vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwmsac_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmsac_vv_bf16mf2_f32m1_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmsac_vv_bf16mf2_f32m1_rm_tumu(vbool32_t vm,
+ vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmsac_vf_bf16mf2_f32m1_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmsac_vf_bf16mf2_f32m1_rm_tumu(vbool32_t vm,
+ vfloat32m1_t vd, __bf16 vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmsac_vv_bf16m1_f32m2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmsac_vv_bf16m1_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmsac_vf_bf16m1_f32m2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmsac_vf_bf16m1_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmsac_vv_bf16m2_f32m4_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmsac_vv_bf16m2_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmsac_vf_bf16m2_f32m4_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmsac_vf_bf16m2_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmsac_vv_bf16m4_f32m8_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmsac_vv_bf16m4_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmsac_vf_bf16m4_f32m8_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmsac_vf_bf16m4_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmsac_vv_bf16mf4_f32mf2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmsac_vv_bf16mf4_f32mf2_rm_mu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmsac_vf_bf16mf4_f32mf2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmsac_vf_bf16mf4_f32mf2_rm_mu(vbool64_t vm,
+ vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmsac_vv_bf16mf2_f32m1_rm_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmsac_vv_bf16mf2_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmsac_vf_bf16mf2_f32m1_rm_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmsac_vf_bf16mf2_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmsac_vv_bf16m1_f32m2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmsac_vv_bf16m1_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwmsac_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmsac_vf_bf16m1_f32m2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmsac_vf_bf16m1_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmsac_vv_bf16m2_f32m4_rm_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmsac_vv_bf16m2_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwmsac_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmsac_vf_bf16m2_f32m4_rm_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmsac_vf_bf16m2_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmsac_vv_bf16m4_f32m8_rm_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmsac_vv_bf16m4_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwmsac_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmsac_vf_bf16m4_f32m8_rm_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmsac_vf_bf16m4_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwmsac_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfwmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfwmul.c
new file mode 100644
index 0000000000000..4a2b5e39ef2cf
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfwmul.c
@@ -0,0 +1,975 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmul_vv_bf16mf4_f32mf2_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmul_vv_bf16mf4_f32mf2_tu(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwmul_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmul_vf_bf16mf4_f32mf2_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmul_vf_bf16mf4_f32mf2_tu(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmul_vv_bf16mf2_f32m1_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmul_vv_bf16mf2_f32m1_tu(vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwmul_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmul_vf_bf16mf2_f32m1_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmul_vf_bf16mf2_f32m1_tu(vfloat32m1_t vd,
+ vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmul_vv_bf16m1_f32m2_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmul_vv_bf16m1_f32m2_tu(vfloat32m2_t vd, vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwmul_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmul_vf_bf16m1_f32m2_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmul_vf_bf16m1_f32m2_tu(vfloat32m2_t vd, vbfloat16m1_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwmul_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmul_vv_bf16m2_f32m4_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmul_vv_bf16m2_f32m4_tu(vfloat32m4_t vd, vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwmul_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmul_vf_bf16m2_f32m4_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmul_vf_bf16m2_f32m4_tu(vfloat32m4_t vd, vbfloat16m2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwmul_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmul_vv_bf16m4_f32m8_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmul_vv_bf16m4_f32m8_tu(vfloat32m8_t vd, vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwmul_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmul_vf_bf16m4_f32m8_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmul_vf_bf16m4_f32m8_tu(vfloat32m8_t vd, vbfloat16m4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwmul_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmul_vv_bf16mf4_f32mf2_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmul_vv_bf16mf4_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwmul_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmul_vf_bf16mf4_f32mf2_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmul_vf_bf16mf4_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmul_vv_bf16mf2_f32m1_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmul_vv_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwmul_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmul_vf_bf16mf2_f32m1_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmul_vf_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmul_vv_bf16m1_f32m2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmul_vv_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwmul_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmul_vf_bf16m1_f32m2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmul_vf_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmul_vv_bf16m2_f32m4_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmul_vv_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwmul_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmul_vf_bf16m2_f32m4_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmul_vf_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmul_vv_bf16m4_f32m8_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmul_vv_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwmul_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmul_vf_bf16m4_f32m8_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmul_vf_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmul_vv_bf16mf4_f32mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmul_vv_bf16mf4_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwmul_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmul_vf_bf16mf4_f32mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmul_vf_bf16mf4_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmul_vv_bf16mf2_f32m1_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmul_vv_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwmul_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmul_vf_bf16mf2_f32m1_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmul_vf_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmul_vv_bf16m1_f32m2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmul_vv_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwmul_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmul_vf_bf16m1_f32m2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmul_vf_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmul_vv_bf16m2_f32m4_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmul_vv_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwmul_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmul_vf_bf16m2_f32m4_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmul_vf_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmul_vv_bf16m4_f32m8_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmul_vv_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwmul_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmul_vf_bf16m4_f32m8_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmul_vf_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmul_vv_bf16mf4_f32mf2_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmul_vv_bf16mf4_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwmul_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmul_vf_bf16mf4_f32mf2_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmul_vf_bf16mf4_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmul_vv_bf16mf2_f32m1_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmul_vv_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwmul_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmul_vf_bf16mf2_f32m1_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmul_vf_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmul_vv_bf16m1_f32m2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmul_vv_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwmul_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmul_vf_bf16m1_f32m2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmul_vf_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmul_vv_bf16m2_f32m4_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmul_vv_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwmul_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmul_vf_bf16m2_f32m4_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmul_vf_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmul_vv_bf16m4_f32m8_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmul_vv_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwmul_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmul_vf_bf16m4_f32m8_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmul_vf_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmul_vv_bf16mf4_f32mf2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmul_vv_bf16mf4_f32mf2_rm_tu(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwmul_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmul_vf_bf16mf4_f32mf2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmul_vf_bf16mf4_f32mf2_rm_tu(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwmul_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmul_vv_bf16mf2_f32m1_rm_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmul_vv_bf16mf2_f32m1_rm_tu(vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwmul_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmul_vf_bf16mf2_f32m1_rm_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmul_vf_bf16mf2_f32m1_rm_tu(vfloat32m1_t vd,
+ vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmul_vv_bf16m1_f32m2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmul_vv_bf16m1_f32m2_rm_tu(vfloat32m2_t vd,
+ vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwmul_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmul_vf_bf16m1_f32m2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmul_vf_bf16m1_f32m2_rm_tu(vfloat32m2_t vd,
+ vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmul_vv_bf16m2_f32m4_rm_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmul_vv_bf16m2_f32m4_rm_tu(vfloat32m4_t vd,
+ vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwmul_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmul_vf_bf16m2_f32m4_rm_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmul_vf_bf16m2_f32m4_rm_tu(vfloat32m4_t vd,
+ vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmul_vv_bf16m4_f32m8_rm_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmul_vv_bf16m4_f32m8_rm_tu(vfloat32m8_t vd,
+ vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwmul_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmul_vf_bf16m4_f32m8_rm_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmul_vf_bf16m4_f32m8_rm_tu(vfloat32m8_t vd,
+ vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmul_vv_bf16mf4_f32mf2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmul_vv_bf16mf4_f32mf2_rm_tum(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwmul_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmul_vf_bf16mf4_f32mf2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmul_vf_bf16mf4_f32mf2_rm_tum(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwmul_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmul_vv_bf16mf2_f32m1_rm_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmul_vv_bf16mf2_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1,
+ size_t vl) {
+ return __riscv_vfwmul_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmul_vf_bf16mf2_f32m1_rm_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmul_vf_bf16mf2_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmul_vv_bf16m1_f32m2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmul_vv_bf16m1_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwmul_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmul_vf_bf16m1_f32m2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmul_vf_bf16m1_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmul_vv_bf16m2_f32m4_rm_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmul_vv_bf16m2_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwmul_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmul_vf_bf16m2_f32m4_rm_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmul_vf_bf16m2_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmul_vv_bf16m4_f32m8_rm_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmul_vv_bf16m4_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwmul_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmul_vf_bf16m4_f32m8_rm_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmul_vf_bf16m4_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmul_vv_bf16mf4_f32mf2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmul_vv_bf16mf4_f32mf2_rm_tumu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwmul_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmul_vf_bf16mf4_f32mf2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmul_vf_bf16mf4_f32mf2_rm_tumu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwmul_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmul_vv_bf16mf2_f32m1_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmul_vv_bf16mf2_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1,
+ size_t vl) {
+ return __riscv_vfwmul_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmul_vf_bf16mf2_f32m1_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmul_vf_bf16mf2_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwmul_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmul_vv_bf16m1_f32m2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmul_vv_bf16m1_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwmul_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmul_vf_bf16m1_f32m2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmul_vf_bf16m1_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmul_vv_bf16m2_f32m4_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmul_vv_bf16m2_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwmul_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmul_vf_bf16m2_f32m4_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmul_vf_bf16m2_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmul_vv_bf16m4_f32m8_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmul_vv_bf16m4_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwmul_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmul_vf_bf16m4_f32m8_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmul_vf_bf16m4_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmul_vv_bf16mf4_f32mf2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmul_vv_bf16mf4_f32mf2_rm_mu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwmul_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwmul_vf_bf16mf4_f32mf2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwmul_vf_bf16mf4_f32mf2_rm_mu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwmul_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmul_vv_bf16mf2_f32m1_rm_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmul_vv_bf16mf2_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwmul_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwmul_vf_bf16mf2_f32m1_rm_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwmul_vf_bf16mf2_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmul_vv_bf16m1_f32m2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmul_vv_bf16m1_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwmul_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwmul_vf_bf16m1_f32m2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwmul_vf_bf16m1_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmul_vv_bf16m2_f32m4_rm_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmul_vv_bf16m2_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwmul_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwmul_vf_bf16m2_f32m4_rm_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwmul_vf_bf16m2_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmul_vv_bf16m4_f32m8_rm_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmul_vv_bf16m4_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwmul_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwmul_vf_bf16m4_f32m8_rm_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwmul_vf_bf16m4_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwmul_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfwnmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfwnmacc.c
new file mode 100644
index 0000000000000..57e433441cd40
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfwnmacc.c
@@ -0,0 +1,994 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmacc_vv_bf16mf4_f32mf2_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmacc_vv_bf16mf4_f32mf2_tu(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmacc_vf_bf16mf4_f32mf2_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmacc_vf_bf16mf4_f32mf2_tu(vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmacc_vv_bf16mf2_f32m1_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmacc_vv_bf16mf2_f32m1_tu(vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmacc_vf_bf16mf2_f32m1_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmacc_vf_bf16mf2_f32m1_tu(vfloat32m1_t vd, __bf16 vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmacc_vv_bf16m1_f32m2_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmacc_vv_bf16m1_f32m2_tu(vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmacc_vf_bf16m1_f32m2_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmacc_vf_bf16m1_f32m2_tu(vfloat32m2_t vd, __bf16 vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmacc_vv_bf16m2_f32m4_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmacc_vv_bf16m2_f32m4_tu(vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmacc_vf_bf16m2_f32m4_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmacc_vf_bf16m2_f32m4_tu(vfloat32m4_t vd, __bf16 vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmacc_vv_bf16m4_f32m8_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmacc_vv_bf16m4_f32m8_tu(vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmacc_vf_bf16m4_f32m8_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmacc_vf_bf16m4_f32m8_tu(vfloat32m8_t vd, __bf16 vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmacc_vv_bf16mf4_f32mf2_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmacc_vv_bf16mf4_f32mf2_tum(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmacc_vf_bf16mf4_f32mf2_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmacc_vf_bf16mf4_f32mf2_tum(vbool64_t vm,
+ vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmacc_vv_bf16mf2_f32m1_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmacc_vv_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmacc_vf_bf16mf2_f32m1_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmacc_vf_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmacc_vv_bf16m1_f32m2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmacc_vv_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmacc_vf_bf16m1_f32m2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmacc_vf_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmacc_vv_bf16m2_f32m4_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmacc_vv_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmacc_vf_bf16m2_f32m4_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmacc_vf_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmacc_vv_bf16m4_f32m8_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmacc_vv_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmacc_vf_bf16m4_f32m8_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmacc_vf_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmacc_vv_bf16mf4_f32mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmacc_vv_bf16mf4_f32mf2_tumu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmacc_vf_bf16mf4_f32mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmacc_vf_bf16mf4_f32mf2_tumu(vbool64_t vm,
+ vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmacc_vv_bf16mf2_f32m1_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmacc_vv_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmacc_vf_bf16mf2_f32m1_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmacc_vf_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmacc_vv_bf16m1_f32m2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmacc_vv_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmacc_vf_bf16m1_f32m2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmacc_vf_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmacc_vv_bf16m2_f32m4_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmacc_vv_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmacc_vf_bf16m2_f32m4_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmacc_vf_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmacc_vv_bf16m4_f32m8_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmacc_vv_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmacc_vf_bf16m4_f32m8_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmacc_vf_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmacc_vv_bf16mf4_f32mf2_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmacc_vv_bf16mf4_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmacc_vf_bf16mf4_f32mf2_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmacc_vf_bf16mf4_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd,
+ __bf16 vs1, vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmacc_vv_bf16mf2_f32m1_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmacc_vv_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmacc_vf_bf16mf2_f32m1_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmacc_vf_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmacc_vv_bf16m1_f32m2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmacc_vv_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmacc_vf_bf16m1_f32m2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmacc_vf_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmacc_vv_bf16m2_f32m4_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmacc_vv_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmacc_vf_bf16m2_f32m4_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmacc_vf_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmacc_vv_bf16m4_f32m8_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmacc_vv_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmacc_vf_bf16m4_f32m8_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmacc_vf_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmacc_vv_bf16mf4_f32mf2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmacc_vv_bf16mf4_f32mf2_rm_tu(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmacc_vf_bf16mf4_f32mf2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmacc_vf_bf16mf4_f32mf2_rm_tu(vfloat32mf2_t vd,
+ __bf16 vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmacc_vv_bf16mf2_f32m1_rm_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmacc_vv_bf16mf2_f32m1_rm_tu(vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmacc_vf_bf16mf2_f32m1_rm_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmacc_vf_bf16mf2_f32m1_rm_tu(vfloat32m1_t vd, __bf16 vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmacc_vv_bf16m1_f32m2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmacc_vv_bf16m1_f32m2_rm_tu(vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmacc_vf_bf16m1_f32m2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmacc_vf_bf16m1_f32m2_rm_tu(vfloat32m2_t vd, __bf16 vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmacc_vv_bf16m2_f32m4_rm_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmacc_vv_bf16m2_f32m4_rm_tu(vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmacc_vf_bf16m2_f32m4_rm_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmacc_vf_bf16m2_f32m4_rm_tu(vfloat32m4_t vd, __bf16 vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmacc_vv_bf16m4_f32m8_rm_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmacc_vv_bf16m4_f32m8_rm_tu(vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmacc_vf_bf16m4_f32m8_rm_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmacc_vf_bf16m4_f32m8_rm_tu(vfloat32m8_t vd, __bf16 vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmacc_vv_bf16mf4_f32mf2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmacc_vv_bf16mf4_f32mf2_rm_tum(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmacc_vf_bf16mf4_f32mf2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmacc_vf_bf16mf4_f32mf2_rm_tum(
+ vbool64_t vm, vfloat32mf2_t vd, __bf16 vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmacc_vv_bf16mf2_f32m1_rm_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmacc_vv_bf16mf2_f32m1_rm_tum(vbool32_t vm,
+ vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmacc_vf_bf16mf2_f32m1_rm_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmacc_vf_bf16mf2_f32m1_rm_tum(vbool32_t vm,
+ vfloat32m1_t vd, __bf16 vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmacc_vv_bf16m1_f32m2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmacc_vv_bf16m1_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmacc_vf_bf16m1_f32m2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmacc_vf_bf16m1_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmacc_vv_bf16m2_f32m4_rm_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmacc_vv_bf16m2_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmacc_vf_bf16m2_f32m4_rm_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmacc_vf_bf16m2_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmacc_vv_bf16m4_f32m8_rm_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmacc_vv_bf16m4_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmacc_vf_bf16m4_f32m8_rm_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmacc_vf_bf16m4_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmacc_vv_bf16mf4_f32mf2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmacc_vv_bf16mf4_f32mf2_rm_tumu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmacc_vf_bf16mf4_f32mf2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmacc_vf_bf16mf4_f32mf2_rm_tumu(
+ vbool64_t vm, vfloat32mf2_t vd, __bf16 vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmacc_vv_bf16mf2_f32m1_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmacc_vv_bf16mf2_f32m1_rm_tumu(vbool32_t vm,
+ vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmacc_vf_bf16mf2_f32m1_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmacc_vf_bf16mf2_f32m1_rm_tumu(vbool32_t vm,
+ vfloat32m1_t vd, __bf16 vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmacc_vv_bf16m1_f32m2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmacc_vv_bf16m1_f32m2_rm_tumu(vbool16_t vm,
+ vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmacc_vf_bf16m1_f32m2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmacc_vf_bf16m1_f32m2_rm_tumu(vbool16_t vm,
+ vfloat32m2_t vd, __bf16 vs1,
+ vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmacc_vv_bf16m2_f32m4_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmacc_vv_bf16m2_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmacc_vf_bf16m2_f32m4_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmacc_vf_bf16m2_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1,
+ vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmacc_vv_bf16m4_f32m8_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmacc_vv_bf16m4_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmacc_vf_bf16m4_f32m8_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmacc_vf_bf16m4_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1,
+ vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmacc_vv_bf16mf4_f32mf2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmacc_vv_bf16mf4_f32mf2_rm_mu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmacc_vf_bf16mf4_f32mf2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmacc_vf_bf16mf4_f32mf2_rm_mu(
+ vbool64_t vm, vfloat32mf2_t vd, __bf16 vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmacc_vv_bf16mf2_f32m1_rm_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmacc_vv_bf16mf2_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmacc_vf_bf16mf2_f32m1_rm_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmacc_vf_bf16mf2_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmacc_vv_bf16m1_f32m2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmacc_vv_bf16m1_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmacc_vf_bf16m1_f32m2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmacc_vf_bf16m1_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmacc_vv_bf16m2_f32m4_rm_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmacc_vv_bf16m2_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmacc_vf_bf16m2_f32m4_rm_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmacc_vf_bf16m2_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmacc_vv_bf16m4_f32m8_rm_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmacc_vv_bf16m4_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmacc_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmacc_vf_bf16m4_f32m8_rm_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmacc_vf_bf16m4_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmacc_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfwnmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfwnmsac.c
new file mode 100644
index 0000000000000..42da060126a2b
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfwnmsac.c
@@ -0,0 +1,994 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmsac_vv_bf16mf4_f32mf2_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmsac_vv_bf16mf4_f32mf2_tu(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmsac_vf_bf16mf4_f32mf2_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmsac_vf_bf16mf4_f32mf2_tu(vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmsac_vv_bf16mf2_f32m1_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmsac_vv_bf16mf2_f32m1_tu(vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmsac_vf_bf16mf2_f32m1_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmsac_vf_bf16mf2_f32m1_tu(vfloat32m1_t vd, __bf16 vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmsac_vv_bf16m1_f32m2_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmsac_vv_bf16m1_f32m2_tu(vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmsac_vf_bf16m1_f32m2_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmsac_vf_bf16m1_f32m2_tu(vfloat32m2_t vd, __bf16 vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmsac_vv_bf16m2_f32m4_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmsac_vv_bf16m2_f32m4_tu(vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmsac_vf_bf16m2_f32m4_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmsac_vf_bf16m2_f32m4_tu(vfloat32m4_t vd, __bf16 vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmsac_vv_bf16m4_f32m8_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmsac_vv_bf16m4_f32m8_tu(vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmsac_vf_bf16m4_f32m8_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmsac_vf_bf16m4_f32m8_tu(vfloat32m8_t vd, __bf16 vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmsac_vv_bf16mf4_f32mf2_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmsac_vv_bf16mf4_f32mf2_tum(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmsac_vf_bf16mf4_f32mf2_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmsac_vf_bf16mf4_f32mf2_tum(vbool64_t vm,
+ vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmsac_vv_bf16mf2_f32m1_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmsac_vv_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmsac_vf_bf16mf2_f32m1_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmsac_vf_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmsac_vv_bf16m1_f32m2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmsac_vv_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmsac_vf_bf16m1_f32m2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmsac_vf_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmsac_vv_bf16m2_f32m4_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmsac_vv_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmsac_vf_bf16m2_f32m4_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmsac_vf_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmsac_vv_bf16m4_f32m8_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmsac_vv_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmsac_vf_bf16m4_f32m8_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmsac_vf_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmsac_vv_bf16mf4_f32mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmsac_vv_bf16mf4_f32mf2_tumu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmsac_vf_bf16mf4_f32mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmsac_vf_bf16mf4_f32mf2_tumu(vbool64_t vm,
+ vfloat32mf2_t vd, __bf16 vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmsac_vv_bf16mf2_f32m1_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmsac_vv_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmsac_vf_bf16mf2_f32m1_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmsac_vf_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmsac_vv_bf16m1_f32m2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmsac_vv_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmsac_vf_bf16m1_f32m2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmsac_vf_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmsac_vv_bf16m2_f32m4_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmsac_vv_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmsac_vf_bf16m2_f32m4_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmsac_vf_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmsac_vv_bf16m4_f32m8_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmsac_vv_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmsac_vf_bf16m4_f32m8_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmsac_vf_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmsac_vv_bf16mf4_f32mf2_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmsac_vv_bf16mf4_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmsac_vf_bf16mf4_f32mf2_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmsac_vf_bf16mf4_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd,
+ __bf16 vs1, vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmsac_vv_bf16mf2_f32m1_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmsac_vv_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmsac_vf_bf16mf2_f32m1_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmsac_vf_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1, vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmsac_vv_bf16m1_f32m2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmsac_vv_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmsac_vf_bf16m1_f32m2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmsac_vf_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmsac_vv_bf16m2_f32m4_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmsac_vv_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmsac_vf_bf16m2_f32m4_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmsac_vf_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmsac_vv_bf16m4_f32m8_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmsac_vv_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmsac_vf_bf16m4_f32m8_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmsac_vf_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmsac_vv_bf16mf4_f32mf2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmsac_vv_bf16mf4_f32mf2_rm_tu(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmsac_vf_bf16mf4_f32mf2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmsac_vf_bf16mf4_f32mf2_rm_tu(vfloat32mf2_t vd,
+ __bf16 vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmsac_vv_bf16mf2_f32m1_rm_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmsac_vv_bf16mf2_f32m1_rm_tu(vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmsac_vf_bf16mf2_f32m1_rm_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmsac_vf_bf16mf2_f32m1_rm_tu(vfloat32m1_t vd, __bf16 vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmsac_vv_bf16m1_f32m2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmsac_vv_bf16m1_f32m2_rm_tu(vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmsac_vf_bf16m1_f32m2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmsac_vf_bf16m1_f32m2_rm_tu(vfloat32m2_t vd, __bf16 vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmsac_vv_bf16m2_f32m4_rm_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmsac_vv_bf16m2_f32m4_rm_tu(vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmsac_vf_bf16m2_f32m4_rm_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmsac_vf_bf16m2_f32m4_rm_tu(vfloat32m4_t vd, __bf16 vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmsac_vv_bf16m4_f32m8_rm_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmsac_vv_bf16m4_f32m8_rm_tu(vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmsac_vf_bf16m4_f32m8_rm_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmsac_vf_bf16m4_f32m8_rm_tu(vfloat32m8_t vd, __bf16 vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmsac_vv_bf16mf4_f32mf2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmsac_vv_bf16mf4_f32mf2_rm_tum(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmsac_vf_bf16mf4_f32mf2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmsac_vf_bf16mf4_f32mf2_rm_tum(
+ vbool64_t vm, vfloat32mf2_t vd, __bf16 vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmsac_vv_bf16mf2_f32m1_rm_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmsac_vv_bf16mf2_f32m1_rm_tum(vbool32_t vm,
+ vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmsac_vf_bf16mf2_f32m1_rm_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmsac_vf_bf16mf2_f32m1_rm_tum(vbool32_t vm,
+ vfloat32m1_t vd, __bf16 vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmsac_vv_bf16m1_f32m2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmsac_vv_bf16m1_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmsac_vf_bf16m1_f32m2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmsac_vf_bf16m1_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmsac_vv_bf16m2_f32m4_rm_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmsac_vv_bf16m2_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmsac_vf_bf16m2_f32m4_rm_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmsac_vf_bf16m2_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmsac_vv_bf16m4_f32m8_rm_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmsac_vv_bf16m4_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmsac_vf_bf16m4_f32m8_rm_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmsac_vf_bf16m4_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmsac_vv_bf16mf4_f32mf2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmsac_vv_bf16mf4_f32mf2_rm_tumu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmsac_vf_bf16mf4_f32mf2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmsac_vf_bf16mf4_f32mf2_rm_tumu(
+ vbool64_t vm, vfloat32mf2_t vd, __bf16 vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmsac_vv_bf16mf2_f32m1_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmsac_vv_bf16mf2_f32m1_rm_tumu(vbool32_t vm,
+ vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmsac_vf_bf16mf2_f32m1_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmsac_vf_bf16mf2_f32m1_rm_tumu(vbool32_t vm,
+ vfloat32m1_t vd, __bf16 vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmsac_vv_bf16m1_f32m2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmsac_vv_bf16m1_f32m2_rm_tumu(vbool16_t vm,
+ vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmsac_vf_bf16m1_f32m2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmsac_vf_bf16m1_f32m2_rm_tumu(vbool16_t vm,
+ vfloat32m2_t vd, __bf16 vs1,
+ vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmsac_vv_bf16m2_f32m4_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmsac_vv_bf16m2_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmsac_vf_bf16m2_f32m4_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmsac_vf_bf16m2_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1,
+ vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmsac_vv_bf16m4_f32m8_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmsac_vv_bf16m4_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmsac_vf_bf16m4_f32m8_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmsac_vf_bf16m4_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1,
+ vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmsac_vv_bf16mf4_f32mf2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmsac_vv_bf16mf4_f32mf2_rm_mu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs1,
+ vbfloat16mf4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwnmsac_vf_bf16mf4_f32mf2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.mask.nxv1f32.bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], bfloat [[VS1]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwnmsac_vf_bf16mf4_f32mf2_rm_mu(
+ vbool64_t vm, vfloat32mf2_t vd, __bf16 vs1, vbfloat16mf4_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmsac_vv_bf16mf2_f32m1_rm_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmsac_vv_bf16mf2_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwnmsac_vf_bf16mf2_f32m1_rm_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.mask.nxv2f32.bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], bfloat [[VS1]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwnmsac_vf_bf16mf2_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd,
+ __bf16 vs1,
+ vbfloat16mf2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmsac_vv_bf16m1_f32m2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmsac_vv_bf16m1_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs1,
+ vbfloat16m1_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwnmsac_vf_bf16m1_f32m2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.mask.nxv4f32.bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], bfloat [[VS1]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwnmsac_vf_bf16m1_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd,
+ __bf16 vs1, vbfloat16m1_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmsac_vv_bf16m2_f32m4_rm_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmsac_vv_bf16m2_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs1,
+ vbfloat16m2_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwnmsac_vf_bf16m2_f32m4_rm_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.mask.nxv8f32.bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], bfloat [[VS1]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwnmsac_vf_bf16m2_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd,
+ __bf16 vs1, vbfloat16m2_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmsac_vv_bf16m4_f32m8_rm_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmsac_vv_bf16m4_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs1,
+ vbfloat16m4_t vs2, size_t vl) {
+ return __riscv_vfwnmsac_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwnmsac_vf_bf16m4_f32m8_rm_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], bfloat noundef [[VS1:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.mask.nxv16f32.bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], bfloat [[VS1]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwnmsac_vf_bf16m4_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd,
+ __bf16 vs1, vbfloat16m4_t vs2,
+ size_t vl) {
+ return __riscv_vfwnmsac_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfwsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfwsub.c
new file mode 100644
index 0000000000000..1378bc963b216
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfwsub.c
@@ -0,0 +1,1932 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_vv_bf16mf4_f32mf2_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_vv_bf16mf4_f32mf2_tu(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_vf_bf16mf4_f32mf2_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_vf_bf16mf4_f32mf2_tu(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_wv_bf16mf4_f32mf2_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_wv_bf16mf4_f32mf2_tu(vfloat32mf2_t vd,
+ vfloat32mf2_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_wf_bf16_f32mf2_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.nxv1f32.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_wf_bf16_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_wf_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_vv_bf16mf2_f32m1_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_vv_bf16mf2_f32m1_tu(vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_vf_bf16mf2_f32m1_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_vf_bf16mf2_f32m1_tu(vfloat32m1_t vd,
+ vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_wv_bf16mf2_f32m1_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_wv_bf16mf2_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_wf_bf16_f32m1_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.nxv2f32.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_wf_bf16_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_wf_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_vv_bf16m1_f32m2_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_vv_bf16m1_f32m2_tu(vfloat32m2_t vd, vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_vf_bf16m1_f32m2_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_vf_bf16m1_f32m2_tu(vfloat32m2_t vd, vbfloat16m1_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_vf_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_wv_bf16m1_f32m2_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_wv_bf16m1_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_wf_bf16_f32m2_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.nxv4f32.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_wf_bf16_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_wf_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_vv_bf16m2_f32m4_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_vv_bf16m2_f32m4_tu(vfloat32m4_t vd, vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_vf_bf16m2_f32m4_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_vf_bf16m2_f32m4_tu(vfloat32m4_t vd, vbfloat16m2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_vf_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_wv_bf16m2_f32m4_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_wv_bf16m2_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_wf_bf16_f32m4_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.nxv8f32.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_wf_bf16_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_wf_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_vv_bf16m4_f32m8_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_vv_bf16m4_f32m8_tu(vfloat32m8_t vd, vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_vf_bf16m4_f32m8_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_vf_bf16m4_f32m8_tu(vfloat32m8_t vd, vbfloat16m4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_vf_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_wv_bf16m4_f32m8_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x bfloat> [[VS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_wv_bf16m4_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_wf_bf16_f32m8_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.nxv16f32.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_wf_bf16_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_wf_tu(vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_vv_bf16mf4_f32mf2_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_vv_bf16mf4_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_vf_bf16mf4_f32mf2_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_vf_bf16mf4_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_wv_bf16mf4_f32mf2_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.mask.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_wv_bf16mf4_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd,
+ vfloat32mf2_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_wf_bf16_f32mf2_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.mask.nxv1f32.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_wf_bf16_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd,
+ vfloat32mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_vv_bf16mf2_f32m1_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_vv_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_vf_bf16mf2_f32m1_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_vf_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_wv_bf16mf2_f32m1_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.mask.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_wv_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd,
+ vfloat32m1_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_wf_bf16_f32m1_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.mask.nxv2f32.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_wf_bf16_f32m1_tum(vbool32_t vm, vfloat32m1_t vd,
+ vfloat32m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_vv_bf16m1_f32m2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_vv_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_vf_bf16m1_f32m2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_vf_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_wv_bf16m1_f32m2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.mask.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_wv_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd,
+ vfloat32m2_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_wf_bf16_f32m2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.mask.nxv4f32.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_wf_bf16_f32m2_tum(vbool16_t vm, vfloat32m2_t vd,
+ vfloat32m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_vv_bf16m2_f32m4_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_vv_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_vf_bf16m2_f32m4_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_vf_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_wv_bf16m2_f32m4_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.mask.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_wv_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd,
+ vfloat32m4_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_wf_bf16_f32m4_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.mask.nxv8f32.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_wf_bf16_f32m4_tum(vbool8_t vm, vfloat32m4_t vd,
+ vfloat32m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_vv_bf16m4_f32m8_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_vv_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_vf_bf16m4_f32m8_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_vf_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_wv_bf16m4_f32m8_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.mask.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_wv_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd,
+ vfloat32m8_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_wf_bf16_f32m8_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.mask.nxv16f32.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_wf_bf16_f32m8_tum(vbool4_t vm, vfloat32m8_t vd,
+ vfloat32m8_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_tum(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_vv_bf16mf4_f32mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_vv_bf16mf4_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_vv_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_vf_bf16mf4_f32mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_vf_bf16mf4_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_wv_bf16mf4_f32mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.mask.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_wv_bf16mf4_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd,
+ vfloat32mf2_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_wv_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_wf_bf16_f32mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.mask.nxv1f32.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_wf_bf16_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd,
+ vfloat32mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_vv_bf16mf2_f32m1_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_vv_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_vf_bf16mf2_f32m1_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_vf_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_wv_bf16mf2_f32m1_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.mask.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_wv_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vfloat32m1_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_wf_bf16_f32m1_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.mask.nxv2f32.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_wf_bf16_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vfloat32m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_vv_bf16m1_f32m2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_vv_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_vf_bf16m1_f32m2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_vf_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_wv_bf16m1_f32m2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.mask.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_wv_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vfloat32m2_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_wf_bf16_f32m2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.mask.nxv4f32.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_wf_bf16_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vfloat32m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_vv_bf16m2_f32m4_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_vv_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_vf_bf16m2_f32m4_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_vf_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_wv_bf16m2_f32m4_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.mask.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_wv_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vfloat32m4_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_wf_bf16_f32m4_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.mask.nxv8f32.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_wf_bf16_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vfloat32m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_vv_bf16m4_f32m8_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_vv_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_vf_bf16m4_f32m8_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_vf_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_wv_bf16m4_f32m8_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.mask.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_wv_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vfloat32m8_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_wf_bf16_f32m8_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.mask.nxv16f32.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_wf_bf16_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vfloat32m8_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_tumu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_vv_bf16mf4_f32mf2_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_vv_bf16mf4_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_vf_bf16mf4_f32mf2_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_vf_bf16mf4_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_wv_bf16mf4_f32mf2_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.mask.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_wv_bf16mf4_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd,
+ vfloat32mf2_t vs2,
+ vbfloat16mf4_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_wf_bf16_f32mf2_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.mask.nxv1f32.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_wf_bf16_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd,
+ vfloat32mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_vv_bf16mf2_f32m1_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_vv_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_vf_bf16mf2_f32m1_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_vf_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_wv_bf16mf2_f32m1_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.mask.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_wv_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd,
+ vfloat32m1_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_wf_bf16_f32m1_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.mask.nxv2f32.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_wf_bf16_f32m1_mu(vbool32_t vm, vfloat32m1_t vd,
+ vfloat32m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_vv_bf16m1_f32m2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_vv_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_vf_bf16m1_f32m2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_vf_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_wv_bf16m1_f32m2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.mask.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_wv_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd,
+ vfloat32m2_t vs2, vbfloat16m1_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_wv_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_wf_bf16_f32m2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.mask.nxv4f32.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_wf_bf16_f32m2_mu(vbool16_t vm, vfloat32m2_t vd,
+ vfloat32m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_vv_bf16m2_f32m4_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_vv_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_vf_bf16m2_f32m4_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_vf_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_wv_bf16m2_f32m4_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.mask.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_wv_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd,
+ vfloat32m4_t vs2, vbfloat16m2_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_wv_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_wf_bf16_f32m4_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.mask.nxv8f32.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_wf_bf16_f32m4_mu(vbool8_t vm, vfloat32m4_t vd,
+ vfloat32m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_vv_bf16m4_f32m8_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_vv_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_vf_bf16m4_f32m8_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_vf_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_wv_bf16m4_f32m8_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.mask.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_wv_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd,
+ vfloat32m8_t vs2, vbfloat16m4_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_wv_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_wf_bf16_f32m8_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.mask.nxv16f32.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 7, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_wf_bf16_f32m8_mu(vbool4_t vm, vfloat32m8_t vd,
+ vfloat32m8_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_mu(vm, vd, vs2, rs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_vv_bf16mf4_f32mf2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_vv_bf16mf4_f32mf2_rm_tu(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_vv_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_vf_bf16mf4_f32mf2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_vf_bf16mf4_f32mf2_rm_tu(vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_vf_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_wv_bf16mf4_f32mf2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_wv_bf16mf4_f32mf2_rm_tu(vfloat32mf2_t vd,
+ vfloat32mf2_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_wv_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_wf_bf16_f32mf2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.nxv1f32.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_wf_bf16_f32mf2_rm_tu(vfloat32mf2_t vd,
+ vfloat32mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_vv_bf16mf2_f32m1_rm_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_vv_bf16mf2_f32m1_rm_tu(vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_vf_bf16mf2_f32m1_rm_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_vf_bf16mf2_f32m1_rm_tu(vfloat32m1_t vd,
+ vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_wv_bf16mf2_f32m1_rm_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_wv_bf16mf2_f32m1_rm_tu(vfloat32m1_t vd,
+ vfloat32m1_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_wf_bf16_f32m1_rm_tu(
+// CHECK-RV64-SAME: <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.nxv2f32.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_wf_bf16_f32m1_rm_tu(vfloat32m1_t vd, vfloat32m1_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_wf_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_vv_bf16m1_f32m2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_vv_bf16m1_f32m2_rm_tu(vfloat32m2_t vd,
+ vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_vf_bf16m1_f32m2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_vf_bf16m1_f32m2_rm_tu(vfloat32m2_t vd,
+ vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_wv_bf16m1_f32m2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_wv_bf16m1_f32m2_rm_tu(vfloat32m2_t vd,
+ vfloat32m2_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_wf_bf16_f32m2_rm_tu(
+// CHECK-RV64-SAME: <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.nxv4f32.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_wf_bf16_f32m2_rm_tu(vfloat32m2_t vd, vfloat32m2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_wf_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_vv_bf16m2_f32m4_rm_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_vv_bf16m2_f32m4_rm_tu(vfloat32m4_t vd,
+ vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_vf_bf16m2_f32m4_rm_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_vf_bf16m2_f32m4_rm_tu(vfloat32m4_t vd,
+ vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_wv_bf16m2_f32m4_rm_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_wv_bf16m2_f32m4_rm_tu(vfloat32m4_t vd,
+ vfloat32m4_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_wf_bf16_f32m4_rm_tu(
+// CHECK-RV64-SAME: <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.nxv8f32.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_wf_bf16_f32m4_rm_tu(vfloat32m4_t vd, vfloat32m4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_wf_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_vv_bf16m4_f32m8_rm_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_vv_bf16m4_f32m8_rm_tu(vfloat32m8_t vd,
+ vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_vf_bf16m4_f32m8_rm_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_vf_bf16m4_f32m8_rm_tu(vfloat32m8_t vd,
+ vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_wv_bf16m4_f32m8_rm_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x bfloat> [[VS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_wv_bf16m4_f32m8_rm_tu(vfloat32m8_t vd,
+ vfloat32m8_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_wf_bf16_f32m8_rm_tu(
+// CHECK-RV64-SAME: <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.nxv16f32.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_wf_bf16_f32m8_rm_tu(vfloat32m8_t vd, vfloat32m8_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_wf_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_vv_bf16mf4_f32mf2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_vv_bf16mf4_f32mf2_rm_tum(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_vv_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_vf_bf16mf4_f32mf2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_vf_bf16mf4_f32mf2_rm_tum(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_vf_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_wv_bf16mf4_f32mf2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.mask.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_wv_bf16mf4_f32mf2_rm_tum(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vfloat32mf2_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_wv_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_wf_bf16_f32mf2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.mask.nxv1f32.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_wf_bf16_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd,
+ vfloat32mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_vv_bf16mf2_f32m1_rm_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_vv_bf16mf2_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_vv_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_vf_bf16mf2_f32m1_rm_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_vf_bf16mf2_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_wv_bf16mf2_f32m1_rm_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.mask.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_wv_bf16mf2_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd,
+ vfloat32m1_t vs2,
+ vbfloat16mf2_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_wv_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_wf_bf16_f32m1_rm_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.mask.nxv2f32.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_wf_bf16_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd,
+ vfloat32m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_vv_bf16m1_f32m2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_vv_bf16m1_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_vf_bf16m1_f32m2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_vf_bf16m1_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_wv_bf16m1_f32m2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.mask.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_wv_bf16m1_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd,
+ vfloat32m2_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_wf_bf16_f32m2_rm_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.mask.nxv4f32.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_wf_bf16_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd,
+ vfloat32m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_vv_bf16m2_f32m4_rm_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_vv_bf16m2_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_vf_bf16m2_f32m4_rm_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_vf_bf16m2_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_wv_bf16m2_f32m4_rm_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.mask.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_wv_bf16m2_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd,
+ vfloat32m4_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_wf_bf16_f32m4_rm_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.mask.nxv8f32.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_wf_bf16_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd,
+ vfloat32m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_vv_bf16m4_f32m8_rm_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_vv_bf16m4_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_vf_bf16m4_f32m8_rm_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_vf_bf16m4_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_wv_bf16m4_f32m8_rm_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.mask.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_wv_bf16m4_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd,
+ vfloat32m8_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_wf_bf16_f32m8_rm_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.mask.nxv16f32.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_wf_bf16_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd,
+ vfloat32m8_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_vv_bf16mf4_f32mf2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_vv_bf16mf4_f32mf2_rm_tumu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_vv_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_vf_bf16mf4_f32mf2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_vf_bf16mf4_f32mf2_rm_tumu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_vf_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_wv_bf16mf4_f32mf2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.mask.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_wv_bf16mf4_f32mf2_rm_tumu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vfloat32mf2_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_wv_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_wf_bf16_f32mf2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.mask.nxv1f32.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_wf_bf16_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd,
+ vfloat32mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_vv_bf16mf2_f32m1_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_vv_bf16mf2_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_vv_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_vf_bf16mf2_f32m1_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_vf_bf16mf2_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_vf_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_wv_bf16mf2_f32m1_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.mask.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_wv_bf16mf2_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vfloat32m1_t vs2,
+ vbfloat16mf2_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_wv_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_wf_bf16_f32m1_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.mask.nxv2f32.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_wf_bf16_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd,
+ vfloat32m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_vv_bf16m1_f32m2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_vv_bf16m1_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_vf_bf16m1_f32m2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_vf_bf16m1_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_wv_bf16m1_f32m2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.mask.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_wv_bf16m1_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vfloat32m2_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_wf_bf16_f32m2_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.mask.nxv4f32.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_wf_bf16_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd,
+ vfloat32m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_vv_bf16m2_f32m4_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_vv_bf16m2_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_vf_bf16m2_f32m4_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_vf_bf16m2_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_wv_bf16m2_f32m4_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.mask.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_wv_bf16m2_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vfloat32m4_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_wf_bf16_f32m4_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.mask.nxv8f32.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_wf_bf16_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd,
+ vfloat32m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_vv_bf16m4_f32m8_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_vv_bf16m4_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_vf_bf16m4_f32m8_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_vf_bf16m4_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_wv_bf16m4_f32m8_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.mask.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_wv_bf16m4_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vfloat32m8_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_wf_bf16_f32m8_rm_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.mask.nxv16f32.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_wf_bf16_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd,
+ vfloat32m8_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_vv_bf16mf4_f32mf2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_vv_bf16mf4_f32mf2_rm_mu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_vv_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_vf_bf16mf4_f32mf2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_vf_bf16mf4_f32mf2_rm_mu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vbfloat16mf4_t vs2,
+ __bf16 rs1, size_t vl) {
+ return __riscv_vfwsub_vf_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_wv_bf16mf4_f32mf2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.mask.nxv1f32.nxv1bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x bfloat> [[VS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_wv_bf16mf4_f32mf2_rm_mu(vbool64_t vm,
+ vfloat32mf2_t vd,
+ vfloat32mf2_t vs2,
+ vbfloat16mf4_t vs1,
+ size_t vl) {
+ return __riscv_vfwsub_wv_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwsub_wf_bf16_f32mf2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.mask.nxv1f32.bf16.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], bfloat [[RS1]], <vscale x 1 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwsub_wf_bf16_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd,
+ vfloat32mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_vv_bf16mf2_f32m1_rm_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_vv_bf16mf2_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_vf_bf16mf2_f32m1_rm_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_vf_bf16mf2_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd,
+ vbfloat16mf2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_wv_bf16mf2_f32m1_rm_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.mask.nxv2f32.nxv2bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x bfloat> [[VS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_wv_bf16mf2_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd,
+ vfloat32m1_t vs2,
+ vbfloat16mf2_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwsub_wf_bf16_f32m1_rm_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.mask.nxv2f32.bf16.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], bfloat [[RS1]], <vscale x 2 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwsub_wf_bf16_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd,
+ vfloat32m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_vv_bf16m1_f32m2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_vv_bf16m1_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_vf_bf16m1_f32m2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_vf_bf16m1_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd,
+ vbfloat16m1_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_wv_bf16m1_f32m2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.mask.nxv4f32.nxv4bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x bfloat> [[VS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_wv_bf16m1_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd,
+ vfloat32m2_t vs2,
+ vbfloat16m1_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwsub_wf_bf16_f32m2_rm_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.mask.nxv4f32.bf16.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], bfloat [[RS1]], <vscale x 4 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwsub_wf_bf16_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd,
+ vfloat32m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_vv_bf16m2_f32m4_rm_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_vv_bf16m2_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_vf_bf16m2_f32m4_rm_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_vf_bf16m2_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd,
+ vbfloat16m2_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_wv_bf16m2_f32m4_rm_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.mask.nxv8f32.nxv8bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x bfloat> [[VS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_wv_bf16m2_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd,
+ vfloat32m4_t vs2,
+ vbfloat16m2_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwsub_wf_bf16_f32m4_rm_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.mask.nxv8f32.bf16.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], bfloat [[RS1]], <vscale x 8 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwsub_wf_bf16_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd,
+ vfloat32m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_vv_bf16m4_f32m8_rm_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_vv_bf16m4_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwsub_vv_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_vf_bf16m4_f32m8_rm_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x bfloat> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x bfloat> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_vf_bf16m4_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd,
+ vbfloat16m4_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_vf_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_wv_bf16m4_f32m8_rm_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x bfloat> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.mask.nxv16f32.nxv16bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x bfloat> [[VS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_wv_bf16m4_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd,
+ vfloat32m8_t vs2,
+ vbfloat16m4_t vs1, size_t vl) {
+ return __riscv_vfwsub_wv_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwsub_wf_bf16_f32m8_rm_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.mask.nxv16f32.bf16.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], bfloat [[RS1]], <vscale x 16 x i1> [[VM]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwsub_wf_bf16_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd,
+ vfloat32m8_t vs2, __bf16 rs1,
+ size_t vl) {
+ return __riscv_vfwsub_wf_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vmfeq.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vmfeq.c
new file mode 100644
index 0000000000000..3945f826809b0
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vmfeq.c
@@ -0,0 +1,129 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfeq_vv_bf16mf4_b64_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfeq.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x i1> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfeq_vv_bf16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vmfeq_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfeq_vf_bf16mf4_b64_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfeq.mask.nxv1bf16.bf16.i64(<vscale x 1 x i1> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfeq_vf_bf16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfeq_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfeq_vv_bf16mf2_b32_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfeq.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x i1> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfeq_vv_bf16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vmfeq_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfeq_vf_bf16mf2_b32_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfeq.mask.nxv2bf16.bf16.i64(<vscale x 2 x i1> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfeq_vf_bf16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfeq_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfeq_vv_bf16m1_b16_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfeq.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x i1> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfeq_vv_bf16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vmfeq_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfeq_vf_bf16m1_b16_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfeq.mask.nxv4bf16.bf16.i64(<vscale x 4 x i1> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfeq_vf_bf16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfeq_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfeq_vv_bf16m2_b8_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfeq.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x i1> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfeq_vv_bf16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vmfeq_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfeq_vf_bf16m2_b8_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfeq.mask.nxv8bf16.bf16.i64(<vscale x 8 x i1> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfeq_vf_bf16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfeq_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfeq_vv_bf16m4_b4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfeq.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x i1> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfeq_vv_bf16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vmfeq_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfeq_vf_bf16m4_b4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfeq.mask.nxv16bf16.bf16.i64(<vscale x 16 x i1> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfeq_vf_bf16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfeq_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfeq_vv_bf16m8_b2_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfeq.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x i1> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfeq_vv_bf16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vmfeq_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfeq_vf_bf16m8_b2_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfeq.mask.nxv32bf16.bf16.i64(<vscale x 32 x i1> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfeq_vf_bf16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfeq_mu(mask, maskedoff, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vmfge.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vmfge.c
new file mode 100644
index 0000000000000..82586da09b325
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vmfge.c
@@ -0,0 +1,129 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfge_vv_bf16mf4_b64_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfge.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x i1> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfge_vv_bf16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vmfge_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfge_vf_bf16mf4_b64_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfge.mask.nxv1bf16.bf16.i64(<vscale x 1 x i1> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfge_vf_bf16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfge_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfge_vv_bf16mf2_b32_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfge.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x i1> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfge_vv_bf16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vmfge_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfge_vf_bf16mf2_b32_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfge.mask.nxv2bf16.bf16.i64(<vscale x 2 x i1> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfge_vf_bf16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfge_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfge_vv_bf16m1_b16_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfge.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x i1> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfge_vv_bf16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vmfge_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfge_vf_bf16m1_b16_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfge.mask.nxv4bf16.bf16.i64(<vscale x 4 x i1> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfge_vf_bf16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfge_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfge_vv_bf16m2_b8_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfge.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x i1> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfge_vv_bf16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vmfge_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfge_vf_bf16m2_b8_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfge.mask.nxv8bf16.bf16.i64(<vscale x 8 x i1> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfge_vf_bf16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfge_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfge_vv_bf16m4_b4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfge.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x i1> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfge_vv_bf16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vmfge_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfge_vf_bf16m4_b4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfge.mask.nxv16bf16.bf16.i64(<vscale x 16 x i1> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfge_vf_bf16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfge_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfge_vv_bf16m8_b2_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfge.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x i1> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfge_vv_bf16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vmfge_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfge_vf_bf16m8_b2_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfge.mask.nxv32bf16.bf16.i64(<vscale x 32 x i1> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfge_vf_bf16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfge_mu(mask, maskedoff, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vmfgt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vmfgt.c
new file mode 100644
index 0000000000000..75ccbbc1e8a1e
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vmfgt.c
@@ -0,0 +1,129 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfgt_vv_bf16mf4_b64_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfgt.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x i1> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfgt_vv_bf16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vmfgt_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfgt_vf_bf16mf4_b64_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfgt.mask.nxv1bf16.bf16.i64(<vscale x 1 x i1> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfgt_vf_bf16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfgt_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfgt_vv_bf16mf2_b32_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfgt.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x i1> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfgt_vv_bf16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vmfgt_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfgt_vf_bf16mf2_b32_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfgt.mask.nxv2bf16.bf16.i64(<vscale x 2 x i1> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfgt_vf_bf16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfgt_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfgt_vv_bf16m1_b16_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfgt.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x i1> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfgt_vv_bf16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vmfgt_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfgt_vf_bf16m1_b16_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfgt.mask.nxv4bf16.bf16.i64(<vscale x 4 x i1> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfgt_vf_bf16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfgt_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfgt_vv_bf16m2_b8_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfgt.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x i1> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfgt_vv_bf16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vmfgt_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfgt_vf_bf16m2_b8_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfgt.mask.nxv8bf16.bf16.i64(<vscale x 8 x i1> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfgt_vf_bf16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfgt_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfgt_vv_bf16m4_b4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfgt.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x i1> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfgt_vv_bf16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vmfgt_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfgt_vf_bf16m4_b4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfgt.mask.nxv16bf16.bf16.i64(<vscale x 16 x i1> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfgt_vf_bf16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfgt_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfgt_vv_bf16m8_b2_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfgt.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x i1> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfgt_vv_bf16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vmfgt_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfgt_vf_bf16m8_b2_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfgt.mask.nxv32bf16.bf16.i64(<vscale x 32 x i1> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfgt_vf_bf16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfgt_mu(mask, maskedoff, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vmfle.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vmfle.c
new file mode 100644
index 0000000000000..49ff1c9812d62
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vmfle.c
@@ -0,0 +1,129 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfle_vv_bf16mf4_b64_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfle.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x i1> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfle_vv_bf16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vmfle_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfle_vf_bf16mf4_b64_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfle.mask.nxv1bf16.bf16.i64(<vscale x 1 x i1> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfle_vf_bf16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfle_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfle_vv_bf16mf2_b32_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfle.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x i1> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfle_vv_bf16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vmfle_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfle_vf_bf16mf2_b32_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfle.mask.nxv2bf16.bf16.i64(<vscale x 2 x i1> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfle_vf_bf16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfle_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfle_vv_bf16m1_b16_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfle.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x i1> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfle_vv_bf16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vmfle_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfle_vf_bf16m1_b16_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfle.mask.nxv4bf16.bf16.i64(<vscale x 4 x i1> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfle_vf_bf16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfle_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfle_vv_bf16m2_b8_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfle.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x i1> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfle_vv_bf16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vmfle_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfle_vf_bf16m2_b8_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfle.mask.nxv8bf16.bf16.i64(<vscale x 8 x i1> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfle_vf_bf16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfle_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfle_vv_bf16m4_b4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfle.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x i1> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfle_vv_bf16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vmfle_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfle_vf_bf16m4_b4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfle.mask.nxv16bf16.bf16.i64(<vscale x 16 x i1> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfle_vf_bf16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfle_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfle_vv_bf16m8_b2_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfle.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x i1> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfle_vv_bf16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vmfle_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfle_vf_bf16m8_b2_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfle.mask.nxv32bf16.bf16.i64(<vscale x 32 x i1> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfle_vf_bf16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfle_mu(mask, maskedoff, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vmflt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vmflt.c
new file mode 100644
index 0000000000000..24b3f9c16d943
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vmflt.c
@@ -0,0 +1,129 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmflt_vv_bf16mf4_b64_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmflt.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x i1> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmflt_vv_bf16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vmflt_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmflt_vf_bf16mf4_b64_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmflt.mask.nxv1bf16.bf16.i64(<vscale x 1 x i1> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmflt_vf_bf16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmflt_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmflt_vv_bf16mf2_b32_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmflt.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x i1> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmflt_vv_bf16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vmflt_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmflt_vf_bf16mf2_b32_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmflt.mask.nxv2bf16.bf16.i64(<vscale x 2 x i1> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmflt_vf_bf16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmflt_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmflt_vv_bf16m1_b16_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmflt.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x i1> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmflt_vv_bf16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vmflt_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmflt_vf_bf16m1_b16_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmflt.mask.nxv4bf16.bf16.i64(<vscale x 4 x i1> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmflt_vf_bf16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmflt_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmflt_vv_bf16m2_b8_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmflt.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x i1> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmflt_vv_bf16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vmflt_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmflt_vf_bf16m2_b8_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmflt.mask.nxv8bf16.bf16.i64(<vscale x 8 x i1> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmflt_vf_bf16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmflt_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmflt_vv_bf16m4_b4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmflt.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x i1> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmflt_vv_bf16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vmflt_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmflt_vf_bf16m4_b4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmflt.mask.nxv16bf16.bf16.i64(<vscale x 16 x i1> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmflt_vf_bf16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmflt_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmflt_vv_bf16m8_b2_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmflt.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x i1> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmflt_vv_bf16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vmflt_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmflt_vf_bf16m8_b2_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmflt.mask.nxv32bf16.bf16.i64(<vscale x 32 x i1> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmflt_vf_bf16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmflt_mu(mask, maskedoff, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vmfne.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vmfne.c
new file mode 100644
index 0000000000000..ca3e134910dfb
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vmfne.c
@@ -0,0 +1,129 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v \
+// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfne_vv_bf16mf4_b64_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1bf16.nxv1bf16.i64(<vscale x 1 x i1> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x bfloat> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfne_vv_bf16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) {
+ return __riscv_vmfne_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i1> @test_vmfne_vf_bf16mf4_b64_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1bf16.bf16.i64(<vscale x 1 x i1> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfne_vf_bf16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfne_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfne_vv_bf16mf2_b32_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfne.mask.nxv2bf16.nxv2bf16.i64(<vscale x 2 x i1> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x bfloat> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfne_vv_bf16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) {
+ return __riscv_vmfne_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i1> @test_vmfne_vf_bf16mf2_b32_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfne.mask.nxv2bf16.bf16.i64(<vscale x 2 x i1> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfne_vf_bf16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfne_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfne_vv_bf16m1_b16_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfne.mask.nxv4bf16.nxv4bf16.i64(<vscale x 4 x i1> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x bfloat> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfne_vv_bf16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) {
+ return __riscv_vmfne_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i1> @test_vmfne_vf_bf16m1_b16_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfne.mask.nxv4bf16.bf16.i64(<vscale x 4 x i1> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfne_vf_bf16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfne_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfne_vv_bf16m2_b8_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfne.mask.nxv8bf16.nxv8bf16.i64(<vscale x 8 x i1> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x bfloat> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfne_vv_bf16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) {
+ return __riscv_vmfne_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i1> @test_vmfne_vf_bf16m2_b8_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfne.mask.nxv8bf16.bf16.i64(<vscale x 8 x i1> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfne_vf_bf16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfne_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfne_vv_bf16m4_b4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfne.mask.nxv16bf16.nxv16bf16.i64(<vscale x 16 x i1> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x bfloat> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfne_vv_bf16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) {
+ return __riscv_vmfne_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i1> @test_vmfne_vf_bf16m4_b4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfne.mask.nxv16bf16.bf16.i64(<vscale x 16 x i1> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfne_vf_bf16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfne_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfne_vv_bf16m8_b2_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x bfloat> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfne.mask.nxv32bf16.nxv32bf16.i64(<vscale x 32 x i1> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x bfloat> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfne_vv_bf16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) {
+ return __riscv_vmfne_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i1> @test_vmfne_vf_bf16m8_b2_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: [[ENTRY:.*:]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmfne.mask.nxv32bf16.bf16.i64(<vscale x 32 x i1> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], bfloat [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmfne_vf_bf16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) {
+ return __riscv_vmfne_mu(mask, maskedoff, op1, op2, vl);
+}
+
More information about the cfe-commits
mailing list