[llvm-branch-commits] [llvm] ad0a7ad - [RISCV] Add intrinsics for vf[n]macc/vf[n]msac/vf[n]madd/vf[n]msub instructions
ShihPo Hung via llvm-branch-commits
llvm-branch-commits at lists.llvm.org
Tue Dec 22 18:38:57 PST 2020
Author: ShihPo Hung
Date: 2020-12-22T18:34:00-08:00
New Revision: ad0a7ad950fec5e422e84f0d3f8942d5e1b116f6
URL: https://github.com/llvm/llvm-project/commit/ad0a7ad950fec5e422e84f0d3f8942d5e1b116f6
DIFF: https://github.com/llvm/llvm-project/commit/ad0a7ad950fec5e422e84f0d3f8942d5e1b116f6.diff
LOG: [RISCV] Add intrinsics for vf[n]macc/vf[n]msac/vf[n]madd/vf[n]msub instructions
This patch defines vfmadd/vfnmacc, vfmsac/vfnmsac, vfmadd/vfnmadd,
and vfmsub/vfnmsub lower to V instructions.
Authored-by: Roger Ferrer Ibanez <rofirrim at gmail.com>
Co-Authored-by: ShihPo Hung <shihpo.hung at sifive.com>
Differential Revision: https://reviews.llvm.org/D93691
Added:
llvm/test/CodeGen/RISCV/rvv/vfmacc-rv32.ll
llvm/test/CodeGen/RISCV/rvv/vfmacc-rv64.ll
llvm/test/CodeGen/RISCV/rvv/vfmadd-rv32.ll
llvm/test/CodeGen/RISCV/rvv/vfmadd-rv64.ll
llvm/test/CodeGen/RISCV/rvv/vfmsac-rv32.ll
llvm/test/CodeGen/RISCV/rvv/vfmsac-rv64.ll
llvm/test/CodeGen/RISCV/rvv/vfmsub-rv32.ll
llvm/test/CodeGen/RISCV/rvv/vfmsub-rv64.ll
llvm/test/CodeGen/RISCV/rvv/vfnmacc-rv32.ll
llvm/test/CodeGen/RISCV/rvv/vfnmacc-rv64.ll
llvm/test/CodeGen/RISCV/rvv/vfnmadd-rv32.ll
llvm/test/CodeGen/RISCV/rvv/vfnmadd-rv64.ll
llvm/test/CodeGen/RISCV/rvv/vfnmsac-rv32.ll
llvm/test/CodeGen/RISCV/rvv/vfnmsac-rv64.ll
llvm/test/CodeGen/RISCV/rvv/vfnmsub-rv32.ll
llvm/test/CodeGen/RISCV/rvv/vfnmsub-rv64.ll
Modified:
llvm/include/llvm/IR/IntrinsicsRISCV.td
llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
Removed:
################################################################################
diff --git a/llvm/include/llvm/IR/IntrinsicsRISCV.td b/llvm/include/llvm/IR/IntrinsicsRISCV.td
index ba0929b16ea5..dc080db5b30f 100644
--- a/llvm/include/llvm/IR/IntrinsicsRISCV.td
+++ b/llvm/include/llvm/IR/IntrinsicsRISCV.td
@@ -551,6 +551,15 @@ let TargetPrefix = "riscv" in {
defm vfwmul : RISCVBinaryABX;
+ defm vfmacc : RISCVTernaryAAXA;
+ defm vfnmacc : RISCVTernaryAAXA;
+ defm vfmsac : RISCVTernaryAAXA;
+ defm vfnmsac : RISCVTernaryAAXA;
+ defm vfmadd : RISCVTernaryAAXA;
+ defm vfnmadd : RISCVTernaryAAXA;
+ defm vfmsub : RISCVTernaryAAXA;
+ defm vfnmsub : RISCVTernaryAAXA;
+
defm vfsgnj : RISCVBinaryAAX;
defm vfsgnjn : RISCVBinaryAAX;
defm vfsgnjx : RISCVBinaryAAX;
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
index f3b6d2f5867f..5c858b0e0982 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
@@ -885,9 +885,10 @@ multiclass VPseudoTernaryV_VX<string Constraint = ""> {
defm _VX : VPseudoTernary<m.vrclass, m.vrclass, GPR, m, Constraint>;
}
-multiclass VPseudoTernaryV_VX_AAXA<string Constraint = ""> {
+multiclass VPseudoTernaryV_VX_AAXA<bit IsFloat, string Constraint = ""> {
foreach m = MxList.m in
- defm _VX : VPseudoTernary<m.vrclass, GPR, m.vrclass, m, Constraint>;
+ defm !if(IsFloat, "_VF", "_VX") : VPseudoTernary<m.vrclass,
+ !if(IsFloat, FPR32, GPR), m.vrclass, m, Constraint>;
}
multiclass VPseudoTernaryW_VV {
@@ -907,9 +908,9 @@ multiclass VPseudoTernaryV_VI<Operand ImmType = simm5, string Constraint = ""> {
defm _VI : VPseudoTernary<m.vrclass, m.vrclass, ImmType, m, Constraint>;
}
-multiclass VPseudoTernaryV_VV_VX_AAXA<string Constraint = ""> {
+multiclass VPseudoTernaryV_VV_VX_AAXA<bit IsFloat = 0, string Constraint = ""> {
defm "" : VPseudoTernaryV_VV<Constraint>;
- defm "" : VPseudoTernaryV_VX_AAXA<Constraint>;
+ defm "" : VPseudoTernaryV_VX_AAXA<IsFloat, Constraint>;
}
multiclass VPseudoTernaryV_VX_VI<Operand ImmType = simm5, string Constraint = ""> {
@@ -1593,7 +1594,8 @@ multiclass VPatTernaryV_VX<string intrinsic, string instruction,
multiclass VPatTernaryV_VX_AAXA<string intrinsic, string instruction,
list<VTypeInfo> vtilist> {
foreach vti = vtilist in
- defm : VPatTernary<intrinsic, instruction, "VX",
+ defm : VPatTernary<intrinsic, instruction,
+ !if(!eq(vti.Scalar, XLenVT), "VX", "VF"),
vti.Vector, vti.Scalar, vti.Vector, vti.Mask,
vti.SEW, vti.LMul, vti.RegClass,
vti.ScalarRegClass, vti.RegClass>;
@@ -1939,6 +1941,18 @@ defm PseudoVFRDIV : VPseudoBinaryV_VX</*IsFloat=*/1>;
//===----------------------------------------------------------------------===//
defm PseudoVFWMUL : VPseudoBinaryW_VV_VX</*IsFloat=*/1>;
+//===----------------------------------------------------------------------===//
+// 14.6. Vector Single-Width Floating-Point Fused Multiply-Add Instructions
+//===----------------------------------------------------------------------===//
+defm PseudoVFMACC : VPseudoTernaryV_VV_VX_AAXA</*IsFloat*/true>;
+defm PseudoVFNMACC : VPseudoTernaryV_VV_VX_AAXA</*IsFloat*/true>;
+defm PseudoVFMSAC : VPseudoTernaryV_VV_VX_AAXA</*IsFloat*/true>;
+defm PseudoVFNMSAC : VPseudoTernaryV_VV_VX_AAXA</*IsFloat*/true>;
+defm PseudoVFMADD : VPseudoTernaryV_VV_VX_AAXA</*IsFloat*/true>;
+defm PseudoVFNMADD : VPseudoTernaryV_VV_VX_AAXA</*IsFloat*/true>;
+defm PseudoVFMSUB : VPseudoTernaryV_VV_VX_AAXA</*IsFloat*/true>;
+defm PseudoVFNMSUB : VPseudoTernaryV_VV_VX_AAXA</*IsFloat*/true>;
+
//===----------------------------------------------------------------------===//
// 14.12. Vector Floating-Point Sign-Injection Instructions
//===----------------------------------------------------------------------===//
@@ -2320,6 +2334,18 @@ defm "" : VPatBinaryV_VX<"int_riscv_vfrdiv", "PseudoVFRDIV", AllFloatVectors>;
//===----------------------------------------------------------------------===//
defm "" : VPatBinaryW_VV_VX<"int_riscv_vfwmul", "PseudoVFWMUL", AllWidenableFloatVectors>;
+//===----------------------------------------------------------------------===//
+// 14.6. Vector Single-Width Floating-Point Fused Multiply-Add Instructions
+//===----------------------------------------------------------------------===//
+defm "" : VPatTernaryV_VV_VX_AAXA<"int_riscv_vfmacc", "PseudoVFMACC", AllFloatVectors>;
+defm "" : VPatTernaryV_VV_VX_AAXA<"int_riscv_vfnmacc", "PseudoVFNMACC", AllFloatVectors>;
+defm "" : VPatTernaryV_VV_VX_AAXA<"int_riscv_vfmsac", "PseudoVFMSAC", AllFloatVectors>;
+defm "" : VPatTernaryV_VV_VX_AAXA<"int_riscv_vfnmsac", "PseudoVFNMSAC", AllFloatVectors>;
+defm "" : VPatTernaryV_VV_VX_AAXA<"int_riscv_vfmadd", "PseudoVFMADD", AllFloatVectors>;
+defm "" : VPatTernaryV_VV_VX_AAXA<"int_riscv_vfnmadd", "PseudoVFNMADD", AllFloatVectors>;
+defm "" : VPatTernaryV_VV_VX_AAXA<"int_riscv_vfmsub", "PseudoVFMSUB", AllFloatVectors>;
+defm "" : VPatTernaryV_VV_VX_AAXA<"int_riscv_vfnmsub", "PseudoVFNMSUB", AllFloatVectors>;
+
//===----------------------------------------------------------------------===//
// 14.12. Vector Floating-Point Sign-Injection Instructions
//===----------------------------------------------------------------------===//
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmacc-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfmacc-rv32.ll
new file mode 100644
index 000000000000..44f0ecb2f0d5
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vfmacc-rv32.ll
@@ -0,0 +1,856 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f,+experimental-zfh -verify-machineinstrs \
+; RUN: --riscv-no-aliases < %s | FileCheck %s
+declare <vscale x 1 x half> @llvm.riscv.vfmacc.nxv1f16.nxv1f16(
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ i32);
+
+define <vscale x 1 x half> @intrinsic_vfmacc_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmacc_vv_nxv1f16_nxv1f16_nxv1f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu
+; CHECK-NEXT: vfmacc.vv v16, v17, v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x half> @llvm.riscv.vfmacc.nxv1f16.nxv1f16(
+ <vscale x 1 x half> %0,
+ <vscale x 1 x half> %1,
+ <vscale x 1 x half> %2,
+ i32 %3)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vfmacc.mask.nxv1f16.nxv1f16(
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x half> @intrinsic_vfmacc_mask_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv1f16_nxv1f16_nxv1f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu
+; CHECK-NEXT: vfmacc.vv v16, v17, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x half> @llvm.riscv.vfmacc.mask.nxv1f16.nxv1f16(
+ <vscale x 1 x half> %0,
+ <vscale x 1 x half> %1,
+ <vscale x 1 x half> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 2 x half> @llvm.riscv.vfmacc.nxv2f16.nxv2f16(
+ <vscale x 2 x half>,
+ <vscale x 2 x half>,
+ <vscale x 2 x half>,
+ i32);
+
+define <vscale x 2 x half> @intrinsic_vfmacc_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmacc_vv_nxv2f16_nxv2f16_nxv2f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu
+; CHECK-NEXT: vfmacc.vv v16, v17, v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x half> @llvm.riscv.vfmacc.nxv2f16.nxv2f16(
+ <vscale x 2 x half> %0,
+ <vscale x 2 x half> %1,
+ <vscale x 2 x half> %2,
+ i32 %3)
+
+ ret <vscale x 2 x half> %a
+}
+
+declare <vscale x 2 x half> @llvm.riscv.vfmacc.mask.nxv2f16.nxv2f16(
+ <vscale x 2 x half>,
+ <vscale x 2 x half>,
+ <vscale x 2 x half>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x half> @intrinsic_vfmacc_mask_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv2f16_nxv2f16_nxv2f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu
+; CHECK-NEXT: vfmacc.vv v16, v17, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x half> @llvm.riscv.vfmacc.mask.nxv2f16.nxv2f16(
+ <vscale x 2 x half> %0,
+ <vscale x 2 x half> %1,
+ <vscale x 2 x half> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vfmacc.nxv4f16.nxv4f16(
+ <vscale x 4 x half>,
+ <vscale x 4 x half>,
+ <vscale x 4 x half>,
+ i32);
+
+define <vscale x 4 x half> @intrinsic_vfmacc_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmacc_vv_nxv4f16_nxv4f16_nxv4f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu
+; CHECK-NEXT: vfmacc.vv v16, v17, v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x half> @llvm.riscv.vfmacc.nxv4f16.nxv4f16(
+ <vscale x 4 x half> %0,
+ <vscale x 4 x half> %1,
+ <vscale x 4 x half> %2,
+ i32 %3)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vfmacc.mask.nxv4f16.nxv4f16(
+ <vscale x 4 x half>,
+ <vscale x 4 x half>,
+ <vscale x 4 x half>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x half> @intrinsic_vfmacc_mask_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv4f16_nxv4f16_nxv4f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu
+; CHECK-NEXT: vfmacc.vv v16, v17, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x half> @llvm.riscv.vfmacc.mask.nxv4f16.nxv4f16(
+ <vscale x 4 x half> %0,
+ <vscale x 4 x half> %1,
+ <vscale x 4 x half> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 8 x half> @llvm.riscv.vfmacc.nxv8f16.nxv8f16(
+ <vscale x 8 x half>,
+ <vscale x 8 x half>,
+ <vscale x 8 x half>,
+ i32);
+
+define <vscale x 8 x half> @intrinsic_vfmacc_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmacc_vv_nxv8f16_nxv8f16_nxv8f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu
+; CHECK-NEXT: vfmacc.vv v16, v18, v20
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x half> @llvm.riscv.vfmacc.nxv8f16.nxv8f16(
+ <vscale x 8 x half> %0,
+ <vscale x 8 x half> %1,
+ <vscale x 8 x half> %2,
+ i32 %3)
+
+ ret <vscale x 8 x half> %a
+}
+
+declare <vscale x 8 x half> @llvm.riscv.vfmacc.mask.nxv8f16.nxv8f16(
+ <vscale x 8 x half>,
+ <vscale x 8 x half>,
+ <vscale x 8 x half>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x half> @intrinsic_vfmacc_mask_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv8f16_nxv8f16_nxv8f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu
+; CHECK-NEXT: vfmacc.vv v16, v18, v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x half> @llvm.riscv.vfmacc.mask.nxv8f16.nxv8f16(
+ <vscale x 8 x half> %0,
+ <vscale x 8 x half> %1,
+ <vscale x 8 x half> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x half> %a
+}
+
+declare <vscale x 16 x half> @llvm.riscv.vfmacc.nxv16f16.nxv16f16(
+ <vscale x 16 x half>,
+ <vscale x 16 x half>,
+ <vscale x 16 x half>,
+ i32);
+
+define <vscale x 16 x half> @intrinsic_vfmacc_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmacc_vv_nxv16f16_nxv16f16_nxv16f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, zero, e16,m4,ta,mu
+; CHECK-NEXT: vle16.v v28, (a0)
+; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu
+; CHECK-NEXT: vfmacc.vv v16, v20, v28
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x half> @llvm.riscv.vfmacc.nxv16f16.nxv16f16(
+ <vscale x 16 x half> %0,
+ <vscale x 16 x half> %1,
+ <vscale x 16 x half> %2,
+ i32 %3)
+
+ ret <vscale x 16 x half> %a
+}
+
+declare <vscale x 16 x half> @llvm.riscv.vfmacc.mask.nxv16f16.nxv16f16(
+ <vscale x 16 x half>,
+ <vscale x 16 x half>,
+ <vscale x 16 x half>,
+ <vscale x 16 x i1>,
+ i32);
+
+define <vscale x 16 x half> @intrinsic_vfmacc_mask_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv16f16_nxv16f16_nxv16f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, zero, e16,m4,ta,mu
+; CHECK-NEXT: vle16.v v28, (a0)
+; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu
+; CHECK-NEXT: vfmacc.vv v16, v20, v28, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x half> @llvm.riscv.vfmacc.mask.nxv16f16.nxv16f16(
+ <vscale x 16 x half> %0,
+ <vscale x 16 x half> %1,
+ <vscale x 16 x half> %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 16 x half> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vfmacc.nxv1f32.nxv1f32(
+ <vscale x 1 x float>,
+ <vscale x 1 x float>,
+ <vscale x 1 x float>,
+ i32);
+
+define <vscale x 1 x float> @intrinsic_vfmacc_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmacc_vv_nxv1f32_nxv1f32_nxv1f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu
+; CHECK-NEXT: vfmacc.vv v16, v17, v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vfmacc.nxv1f32.nxv1f32(
+ <vscale x 1 x float> %0,
+ <vscale x 1 x float> %1,
+ <vscale x 1 x float> %2,
+ i32 %3)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vfmacc.mask.nxv1f32.nxv1f32(
+ <vscale x 1 x float>,
+ <vscale x 1 x float>,
+ <vscale x 1 x float>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x float> @intrinsic_vfmacc_mask_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv1f32_nxv1f32_nxv1f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu
+; CHECK-NEXT: vfmacc.vv v16, v17, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vfmacc.mask.nxv1f32.nxv1f32(
+ <vscale x 1 x float> %0,
+ <vscale x 1 x float> %1,
+ <vscale x 1 x float> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfmacc.nxv2f32.nxv2f32(
+ <vscale x 2 x float>,
+ <vscale x 2 x float>,
+ <vscale x 2 x float>,
+ i32);
+
+define <vscale x 2 x float> @intrinsic_vfmacc_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmacc_vv_nxv2f32_nxv2f32_nxv2f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu
+; CHECK-NEXT: vfmacc.vv v16, v17, v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x float> @llvm.riscv.vfmacc.nxv2f32.nxv2f32(
+ <vscale x 2 x float> %0,
+ <vscale x 2 x float> %1,
+ <vscale x 2 x float> %2,
+ i32 %3)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfmacc.mask.nxv2f32.nxv2f32(
+ <vscale x 2 x float>,
+ <vscale x 2 x float>,
+ <vscale x 2 x float>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x float> @intrinsic_vfmacc_mask_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv2f32_nxv2f32_nxv2f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu
+; CHECK-NEXT: vfmacc.vv v16, v17, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x float> @llvm.riscv.vfmacc.mask.nxv2f32.nxv2f32(
+ <vscale x 2 x float> %0,
+ <vscale x 2 x float> %1,
+ <vscale x 2 x float> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vfmacc.nxv4f32.nxv4f32(
+ <vscale x 4 x float>,
+ <vscale x 4 x float>,
+ <vscale x 4 x float>,
+ i32);
+
+define <vscale x 4 x float> @intrinsic_vfmacc_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmacc_vv_nxv4f32_nxv4f32_nxv4f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu
+; CHECK-NEXT: vfmacc.vv v16, v18, v20
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x float> @llvm.riscv.vfmacc.nxv4f32.nxv4f32(
+ <vscale x 4 x float> %0,
+ <vscale x 4 x float> %1,
+ <vscale x 4 x float> %2,
+ i32 %3)
+
+ ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vfmacc.mask.nxv4f32.nxv4f32(
+ <vscale x 4 x float>,
+ <vscale x 4 x float>,
+ <vscale x 4 x float>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x float> @intrinsic_vfmacc_mask_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv4f32_nxv4f32_nxv4f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu
+; CHECK-NEXT: vfmacc.vv v16, v18, v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x float> @llvm.riscv.vfmacc.mask.nxv4f32.nxv4f32(
+ <vscale x 4 x float> %0,
+ <vscale x 4 x float> %1,
+ <vscale x 4 x float> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vfmacc.nxv8f32.nxv8f32(
+ <vscale x 8 x float>,
+ <vscale x 8 x float>,
+ <vscale x 8 x float>,
+ i32);
+
+define <vscale x 8 x float> @intrinsic_vfmacc_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmacc_vv_nxv8f32_nxv8f32_nxv8f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, zero, e32,m4,ta,mu
+; CHECK-NEXT: vle32.v v28, (a0)
+; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu
+; CHECK-NEXT: vfmacc.vv v16, v20, v28
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x float> @llvm.riscv.vfmacc.nxv8f32.nxv8f32(
+ <vscale x 8 x float> %0,
+ <vscale x 8 x float> %1,
+ <vscale x 8 x float> %2,
+ i32 %3)
+
+ ret <vscale x 8 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vfmacc.mask.nxv8f32.nxv8f32(
+ <vscale x 8 x float>,
+ <vscale x 8 x float>,
+ <vscale x 8 x float>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x float> @intrinsic_vfmacc_mask_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv8f32_nxv8f32_nxv8f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, zero, e32,m4,ta,mu
+; CHECK-NEXT: vle32.v v28, (a0)
+; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu
+; CHECK-NEXT: vfmacc.vv v16, v20, v28, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x float> @llvm.riscv.vfmacc.mask.nxv8f32.nxv8f32(
+ <vscale x 8 x float> %0,
+ <vscale x 8 x float> %1,
+ <vscale x 8 x float> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x float> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vfmacc.nxv1f16.f16(
+ <vscale x 1 x half>,
+ half,
+ <vscale x 1 x half>,
+ i32);
+
+define <vscale x 1 x half> @intrinsic_vfmacc_vf_nxv1f16_f16_nxv1f16(<vscale x 1 x half> %0, half %1, <vscale x 1 x half> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmacc_vf_nxv1f16_f16_nxv1f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.h.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e16,mf4,ta,mu
+; CHECK-NEXT: vfmacc.vf v16, ft0, v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x half> @llvm.riscv.vfmacc.nxv1f16.f16(
+ <vscale x 1 x half> %0,
+ half %1,
+ <vscale x 1 x half> %2,
+ i32 %3)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vfmacc.mask.nxv1f16.f16(
+ <vscale x 1 x half>,
+ half,
+ <vscale x 1 x half>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x half> @intrinsic_vfmacc_mask_vf_nxv1f16_f16_nxv1f16(<vscale x 1 x half> %0, half %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv1f16_f16_nxv1f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.h.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e16,mf4,ta,mu
+; CHECK-NEXT: vfmacc.vf v16, ft0, v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x half> @llvm.riscv.vfmacc.mask.nxv1f16.f16(
+ <vscale x 1 x half> %0,
+ half %1,
+ <vscale x 1 x half> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 2 x half> @llvm.riscv.vfmacc.nxv2f16.f16(
+ <vscale x 2 x half>,
+ half,
+ <vscale x 2 x half>,
+ i32);
+
+define <vscale x 2 x half> @intrinsic_vfmacc_vf_nxv2f16_f16_nxv2f16(<vscale x 2 x half> %0, half %1, <vscale x 2 x half> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmacc_vf_nxv2f16_f16_nxv2f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.h.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e16,mf2,ta,mu
+; CHECK-NEXT: vfmacc.vf v16, ft0, v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x half> @llvm.riscv.vfmacc.nxv2f16.f16(
+ <vscale x 2 x half> %0,
+ half %1,
+ <vscale x 2 x half> %2,
+ i32 %3)
+
+ ret <vscale x 2 x half> %a
+}
+
+declare <vscale x 2 x half> @llvm.riscv.vfmacc.mask.nxv2f16.f16(
+ <vscale x 2 x half>,
+ half,
+ <vscale x 2 x half>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x half> @intrinsic_vfmacc_mask_vf_nxv2f16_f16_nxv2f16(<vscale x 2 x half> %0, half %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv2f16_f16_nxv2f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.h.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e16,mf2,ta,mu
+; CHECK-NEXT: vfmacc.vf v16, ft0, v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x half> @llvm.riscv.vfmacc.mask.nxv2f16.f16(
+ <vscale x 2 x half> %0,
+ half %1,
+ <vscale x 2 x half> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vfmacc.nxv4f16.f16(
+ <vscale x 4 x half>,
+ half,
+ <vscale x 4 x half>,
+ i32);
+
+define <vscale x 4 x half> @intrinsic_vfmacc_vf_nxv4f16_f16_nxv4f16(<vscale x 4 x half> %0, half %1, <vscale x 4 x half> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmacc_vf_nxv4f16_f16_nxv4f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.h.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e16,m1,ta,mu
+; CHECK-NEXT: vfmacc.vf v16, ft0, v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x half> @llvm.riscv.vfmacc.nxv4f16.f16(
+ <vscale x 4 x half> %0,
+ half %1,
+ <vscale x 4 x half> %2,
+ i32 %3)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vfmacc.mask.nxv4f16.f16(
+ <vscale x 4 x half>,
+ half,
+ <vscale x 4 x half>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x half> @intrinsic_vfmacc_mask_vf_nxv4f16_f16_nxv4f16(<vscale x 4 x half> %0, half %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv4f16_f16_nxv4f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.h.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e16,m1,ta,mu
+; CHECK-NEXT: vfmacc.vf v16, ft0, v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x half> @llvm.riscv.vfmacc.mask.nxv4f16.f16(
+ <vscale x 4 x half> %0,
+ half %1,
+ <vscale x 4 x half> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 8 x half> @llvm.riscv.vfmacc.nxv8f16.f16(
+ <vscale x 8 x half>,
+ half,
+ <vscale x 8 x half>,
+ i32);
+
+define <vscale x 8 x half> @intrinsic_vfmacc_vf_nxv8f16_f16_nxv8f16(<vscale x 8 x half> %0, half %1, <vscale x 8 x half> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmacc_vf_nxv8f16_f16_nxv8f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.h.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e16,m2,ta,mu
+; CHECK-NEXT: vfmacc.vf v16, ft0, v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x half> @llvm.riscv.vfmacc.nxv8f16.f16(
+ <vscale x 8 x half> %0,
+ half %1,
+ <vscale x 8 x half> %2,
+ i32 %3)
+
+ ret <vscale x 8 x half> %a
+}
+
+declare <vscale x 8 x half> @llvm.riscv.vfmacc.mask.nxv8f16.f16(
+ <vscale x 8 x half>,
+ half,
+ <vscale x 8 x half>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x half> @intrinsic_vfmacc_mask_vf_nxv8f16_f16_nxv8f16(<vscale x 8 x half> %0, half %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv8f16_f16_nxv8f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.h.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e16,m2,ta,mu
+; CHECK-NEXT: vfmacc.vf v16, ft0, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x half> @llvm.riscv.vfmacc.mask.nxv8f16.f16(
+ <vscale x 8 x half> %0,
+ half %1,
+ <vscale x 8 x half> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x half> %a
+}
+
+declare <vscale x 16 x half> @llvm.riscv.vfmacc.nxv16f16.f16(
+ <vscale x 16 x half>,
+ half,
+ <vscale x 16 x half>,
+ i32);
+
+define <vscale x 16 x half> @intrinsic_vfmacc_vf_nxv16f16_f16_nxv16f16(<vscale x 16 x half> %0, half %1, <vscale x 16 x half> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmacc_vf_nxv16f16_f16_nxv16f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.h.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu
+; CHECK-NEXT: vfmacc.vf v16, ft0, v20
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x half> @llvm.riscv.vfmacc.nxv16f16.f16(
+ <vscale x 16 x half> %0,
+ half %1,
+ <vscale x 16 x half> %2,
+ i32 %3)
+
+ ret <vscale x 16 x half> %a
+}
+
+declare <vscale x 16 x half> @llvm.riscv.vfmacc.mask.nxv16f16.f16(
+ <vscale x 16 x half>,
+ half,
+ <vscale x 16 x half>,
+ <vscale x 16 x i1>,
+ i32);
+
+define <vscale x 16 x half> @intrinsic_vfmacc_mask_vf_nxv16f16_f16_nxv16f16(<vscale x 16 x half> %0, half %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv16f16_f16_nxv16f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.h.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu
+; CHECK-NEXT: vfmacc.vf v16, ft0, v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x half> @llvm.riscv.vfmacc.mask.nxv16f16.f16(
+ <vscale x 16 x half> %0,
+ half %1,
+ <vscale x 16 x half> %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 16 x half> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vfmacc.nxv1f32.f32(
+ <vscale x 1 x float>,
+ float,
+ <vscale x 1 x float>,
+ i32);
+
+define <vscale x 1 x float> @intrinsic_vfmacc_vf_nxv1f32_f32_nxv1f32(<vscale x 1 x float> %0, float %1, <vscale x 1 x float> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmacc_vf_nxv1f32_f32_nxv1f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.w.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e32,mf2,ta,mu
+; CHECK-NEXT: vfmacc.vf v16, ft0, v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vfmacc.nxv1f32.f32(
+ <vscale x 1 x float> %0,
+ float %1,
+ <vscale x 1 x float> %2,
+ i32 %3)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vfmacc.mask.nxv1f32.f32(
+ <vscale x 1 x float>,
+ float,
+ <vscale x 1 x float>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x float> @intrinsic_vfmacc_mask_vf_nxv1f32_f32_nxv1f32(<vscale x 1 x float> %0, float %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv1f32_f32_nxv1f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.w.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e32,mf2,ta,mu
+; CHECK-NEXT: vfmacc.vf v16, ft0, v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vfmacc.mask.nxv1f32.f32(
+ <vscale x 1 x float> %0,
+ float %1,
+ <vscale x 1 x float> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfmacc.nxv2f32.f32(
+ <vscale x 2 x float>,
+ float,
+ <vscale x 2 x float>,
+ i32);
+
+define <vscale x 2 x float> @intrinsic_vfmacc_vf_nxv2f32_f32_nxv2f32(<vscale x 2 x float> %0, float %1, <vscale x 2 x float> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmacc_vf_nxv2f32_f32_nxv2f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.w.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e32,m1,ta,mu
+; CHECK-NEXT: vfmacc.vf v16, ft0, v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x float> @llvm.riscv.vfmacc.nxv2f32.f32(
+ <vscale x 2 x float> %0,
+ float %1,
+ <vscale x 2 x float> %2,
+ i32 %3)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfmacc.mask.nxv2f32.f32(
+ <vscale x 2 x float>,
+ float,
+ <vscale x 2 x float>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x float> @intrinsic_vfmacc_mask_vf_nxv2f32_f32_nxv2f32(<vscale x 2 x float> %0, float %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv2f32_f32_nxv2f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.w.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e32,m1,ta,mu
+; CHECK-NEXT: vfmacc.vf v16, ft0, v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x float> @llvm.riscv.vfmacc.mask.nxv2f32.f32(
+ <vscale x 2 x float> %0,
+ float %1,
+ <vscale x 2 x float> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vfmacc.nxv4f32.f32(
+ <vscale x 4 x float>,
+ float,
+ <vscale x 4 x float>,
+ i32);
+
+define <vscale x 4 x float> @intrinsic_vfmacc_vf_nxv4f32_f32_nxv4f32(<vscale x 4 x float> %0, float %1, <vscale x 4 x float> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmacc_vf_nxv4f32_f32_nxv4f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.w.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e32,m2,ta,mu
+; CHECK-NEXT: vfmacc.vf v16, ft0, v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x float> @llvm.riscv.vfmacc.nxv4f32.f32(
+ <vscale x 4 x float> %0,
+ float %1,
+ <vscale x 4 x float> %2,
+ i32 %3)
+
+ ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vfmacc.mask.nxv4f32.f32(
+ <vscale x 4 x float>,
+ float,
+ <vscale x 4 x float>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x float> @intrinsic_vfmacc_mask_vf_nxv4f32_f32_nxv4f32(<vscale x 4 x float> %0, float %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv4f32_f32_nxv4f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.w.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e32,m2,ta,mu
+; CHECK-NEXT: vfmacc.vf v16, ft0, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x float> @llvm.riscv.vfmacc.mask.nxv4f32.f32(
+ <vscale x 4 x float> %0,
+ float %1,
+ <vscale x 4 x float> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vfmacc.nxv8f32.f32(
+ <vscale x 8 x float>,
+ float,
+ <vscale x 8 x float>,
+ i32);
+
+define <vscale x 8 x float> @intrinsic_vfmacc_vf_nxv8f32_f32_nxv8f32(<vscale x 8 x float> %0, float %1, <vscale x 8 x float> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmacc_vf_nxv8f32_f32_nxv8f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.w.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu
+; CHECK-NEXT: vfmacc.vf v16, ft0, v20
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x float> @llvm.riscv.vfmacc.nxv8f32.f32(
+ <vscale x 8 x float> %0,
+ float %1,
+ <vscale x 8 x float> %2,
+ i32 %3)
+
+ ret <vscale x 8 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vfmacc.mask.nxv8f32.f32(
+ <vscale x 8 x float>,
+ float,
+ <vscale x 8 x float>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x float> @intrinsic_vfmacc_mask_vf_nxv8f32_f32_nxv8f32(<vscale x 8 x float> %0, float %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv8f32_f32_nxv8f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.w.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu
+; CHECK-NEXT: vfmacc.vf v16, ft0, v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x float> @llvm.riscv.vfmacc.mask.nxv8f32.f32(
+ <vscale x 8 x float> %0,
+ float %1,
+ <vscale x 8 x float> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x float> %a
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmacc-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfmacc-rv64.ll
new file mode 100644
index 000000000000..6b83445292a3
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vfmacc-rv64.ll
@@ -0,0 +1,1142 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
+; RUN: --riscv-no-aliases < %s | FileCheck %s
+declare <vscale x 1 x half> @llvm.riscv.vfmacc.nxv1f16.nxv1f16(
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ i64);
+
+define <vscale x 1 x half> @intrinsic_vfmacc_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmacc_vv_nxv1f16_nxv1f16_nxv1f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu
+; CHECK-NEXT: vfmacc.vv v16, v17, v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x half> @llvm.riscv.vfmacc.nxv1f16.nxv1f16(
+ <vscale x 1 x half> %0,
+ <vscale x 1 x half> %1,
+ <vscale x 1 x half> %2,
+ i64 %3)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vfmacc.mask.nxv1f16.nxv1f16(
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x half> @intrinsic_vfmacc_mask_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv1f16_nxv1f16_nxv1f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu
+; CHECK-NEXT: vfmacc.vv v16, v17, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x half> @llvm.riscv.vfmacc.mask.nxv1f16.nxv1f16(
+ <vscale x 1 x half> %0,
+ <vscale x 1 x half> %1,
+ <vscale x 1 x half> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 2 x half> @llvm.riscv.vfmacc.nxv2f16.nxv2f16(
+ <vscale x 2 x half>,
+ <vscale x 2 x half>,
+ <vscale x 2 x half>,
+ i64);
+
+define <vscale x 2 x half> @intrinsic_vfmacc_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmacc_vv_nxv2f16_nxv2f16_nxv2f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu
+; CHECK-NEXT: vfmacc.vv v16, v17, v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x half> @llvm.riscv.vfmacc.nxv2f16.nxv2f16(
+ <vscale x 2 x half> %0,
+ <vscale x 2 x half> %1,
+ <vscale x 2 x half> %2,
+ i64 %3)
+
+ ret <vscale x 2 x half> %a
+}
+
+declare <vscale x 2 x half> @llvm.riscv.vfmacc.mask.nxv2f16.nxv2f16(
+ <vscale x 2 x half>,
+ <vscale x 2 x half>,
+ <vscale x 2 x half>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x half> @intrinsic_vfmacc_mask_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv2f16_nxv2f16_nxv2f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu
+; CHECK-NEXT: vfmacc.vv v16, v17, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x half> @llvm.riscv.vfmacc.mask.nxv2f16.nxv2f16(
+ <vscale x 2 x half> %0,
+ <vscale x 2 x half> %1,
+ <vscale x 2 x half> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vfmacc.nxv4f16.nxv4f16(
+ <vscale x 4 x half>,
+ <vscale x 4 x half>,
+ <vscale x 4 x half>,
+ i64);
+
+define <vscale x 4 x half> @intrinsic_vfmacc_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmacc_vv_nxv4f16_nxv4f16_nxv4f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu
+; CHECK-NEXT: vfmacc.vv v16, v17, v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x half> @llvm.riscv.vfmacc.nxv4f16.nxv4f16(
+ <vscale x 4 x half> %0,
+ <vscale x 4 x half> %1,
+ <vscale x 4 x half> %2,
+ i64 %3)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vfmacc.mask.nxv4f16.nxv4f16(
+ <vscale x 4 x half>,
+ <vscale x 4 x half>,
+ <vscale x 4 x half>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x half> @intrinsic_vfmacc_mask_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv4f16_nxv4f16_nxv4f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu
+; CHECK-NEXT: vfmacc.vv v16, v17, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x half> @llvm.riscv.vfmacc.mask.nxv4f16.nxv4f16(
+ <vscale x 4 x half> %0,
+ <vscale x 4 x half> %1,
+ <vscale x 4 x half> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 8 x half> @llvm.riscv.vfmacc.nxv8f16.nxv8f16(
+ <vscale x 8 x half>,
+ <vscale x 8 x half>,
+ <vscale x 8 x half>,
+ i64);
+
+define <vscale x 8 x half> @intrinsic_vfmacc_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmacc_vv_nxv8f16_nxv8f16_nxv8f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu
+; CHECK-NEXT: vfmacc.vv v16, v18, v20
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x half> @llvm.riscv.vfmacc.nxv8f16.nxv8f16(
+ <vscale x 8 x half> %0,
+ <vscale x 8 x half> %1,
+ <vscale x 8 x half> %2,
+ i64 %3)
+
+ ret <vscale x 8 x half> %a
+}
+
+declare <vscale x 8 x half> @llvm.riscv.vfmacc.mask.nxv8f16.nxv8f16(
+ <vscale x 8 x half>,
+ <vscale x 8 x half>,
+ <vscale x 8 x half>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x half> @intrinsic_vfmacc_mask_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv8f16_nxv8f16_nxv8f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu
+; CHECK-NEXT: vfmacc.vv v16, v18, v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x half> @llvm.riscv.vfmacc.mask.nxv8f16.nxv8f16(
+ <vscale x 8 x half> %0,
+ <vscale x 8 x half> %1,
+ <vscale x 8 x half> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x half> %a
+}
+
+declare <vscale x 16 x half> @llvm.riscv.vfmacc.nxv16f16.nxv16f16(
+ <vscale x 16 x half>,
+ <vscale x 16 x half>,
+ <vscale x 16 x half>,
+ i64);
+
+define <vscale x 16 x half> @intrinsic_vfmacc_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmacc_vv_nxv16f16_nxv16f16_nxv16f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, zero, e16,m4,ta,mu
+; CHECK-NEXT: vle16.v v28, (a0)
+; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu
+; CHECK-NEXT: vfmacc.vv v16, v20, v28
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x half> @llvm.riscv.vfmacc.nxv16f16.nxv16f16(
+ <vscale x 16 x half> %0,
+ <vscale x 16 x half> %1,
+ <vscale x 16 x half> %2,
+ i64 %3)
+
+ ret <vscale x 16 x half> %a
+}
+
+declare <vscale x 16 x half> @llvm.riscv.vfmacc.mask.nxv16f16.nxv16f16(
+ <vscale x 16 x half>,
+ <vscale x 16 x half>,
+ <vscale x 16 x half>,
+ <vscale x 16 x i1>,
+ i64);
+
+define <vscale x 16 x half> @intrinsic_vfmacc_mask_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv16f16_nxv16f16_nxv16f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, zero, e16,m4,ta,mu
+; CHECK-NEXT: vle16.v v28, (a0)
+; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu
+; CHECK-NEXT: vfmacc.vv v16, v20, v28, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x half> @llvm.riscv.vfmacc.mask.nxv16f16.nxv16f16(
+ <vscale x 16 x half> %0,
+ <vscale x 16 x half> %1,
+ <vscale x 16 x half> %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 16 x half> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vfmacc.nxv1f32.nxv1f32(
+ <vscale x 1 x float>,
+ <vscale x 1 x float>,
+ <vscale x 1 x float>,
+ i64);
+
+define <vscale x 1 x float> @intrinsic_vfmacc_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmacc_vv_nxv1f32_nxv1f32_nxv1f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu
+; CHECK-NEXT: vfmacc.vv v16, v17, v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vfmacc.nxv1f32.nxv1f32(
+ <vscale x 1 x float> %0,
+ <vscale x 1 x float> %1,
+ <vscale x 1 x float> %2,
+ i64 %3)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vfmacc.mask.nxv1f32.nxv1f32(
+ <vscale x 1 x float>,
+ <vscale x 1 x float>,
+ <vscale x 1 x float>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x float> @intrinsic_vfmacc_mask_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv1f32_nxv1f32_nxv1f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu
+; CHECK-NEXT: vfmacc.vv v16, v17, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vfmacc.mask.nxv1f32.nxv1f32(
+ <vscale x 1 x float> %0,
+ <vscale x 1 x float> %1,
+ <vscale x 1 x float> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfmacc.nxv2f32.nxv2f32(
+ <vscale x 2 x float>,
+ <vscale x 2 x float>,
+ <vscale x 2 x float>,
+ i64);
+
+define <vscale x 2 x float> @intrinsic_vfmacc_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmacc_vv_nxv2f32_nxv2f32_nxv2f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu
+; CHECK-NEXT: vfmacc.vv v16, v17, v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x float> @llvm.riscv.vfmacc.nxv2f32.nxv2f32(
+ <vscale x 2 x float> %0,
+ <vscale x 2 x float> %1,
+ <vscale x 2 x float> %2,
+ i64 %3)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfmacc.mask.nxv2f32.nxv2f32(
+ <vscale x 2 x float>,
+ <vscale x 2 x float>,
+ <vscale x 2 x float>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x float> @intrinsic_vfmacc_mask_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv2f32_nxv2f32_nxv2f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu
+; CHECK-NEXT: vfmacc.vv v16, v17, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x float> @llvm.riscv.vfmacc.mask.nxv2f32.nxv2f32(
+ <vscale x 2 x float> %0,
+ <vscale x 2 x float> %1,
+ <vscale x 2 x float> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vfmacc.nxv4f32.nxv4f32(
+ <vscale x 4 x float>,
+ <vscale x 4 x float>,
+ <vscale x 4 x float>,
+ i64);
+
+define <vscale x 4 x float> @intrinsic_vfmacc_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmacc_vv_nxv4f32_nxv4f32_nxv4f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu
+; CHECK-NEXT: vfmacc.vv v16, v18, v20
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x float> @llvm.riscv.vfmacc.nxv4f32.nxv4f32(
+ <vscale x 4 x float> %0,
+ <vscale x 4 x float> %1,
+ <vscale x 4 x float> %2,
+ i64 %3)
+
+ ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vfmacc.mask.nxv4f32.nxv4f32(
+ <vscale x 4 x float>,
+ <vscale x 4 x float>,
+ <vscale x 4 x float>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x float> @intrinsic_vfmacc_mask_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv4f32_nxv4f32_nxv4f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu
+; CHECK-NEXT: vfmacc.vv v16, v18, v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x float> @llvm.riscv.vfmacc.mask.nxv4f32.nxv4f32(
+ <vscale x 4 x float> %0,
+ <vscale x 4 x float> %1,
+ <vscale x 4 x float> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vfmacc.nxv8f32.nxv8f32(
+ <vscale x 8 x float>,
+ <vscale x 8 x float>,
+ <vscale x 8 x float>,
+ i64);
+
+define <vscale x 8 x float> @intrinsic_vfmacc_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmacc_vv_nxv8f32_nxv8f32_nxv8f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, zero, e32,m4,ta,mu
+; CHECK-NEXT: vle32.v v28, (a0)
+; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu
+; CHECK-NEXT: vfmacc.vv v16, v20, v28
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x float> @llvm.riscv.vfmacc.nxv8f32.nxv8f32(
+ <vscale x 8 x float> %0,
+ <vscale x 8 x float> %1,
+ <vscale x 8 x float> %2,
+ i64 %3)
+
+ ret <vscale x 8 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vfmacc.mask.nxv8f32.nxv8f32(
+ <vscale x 8 x float>,
+ <vscale x 8 x float>,
+ <vscale x 8 x float>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x float> @intrinsic_vfmacc_mask_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv8f32_nxv8f32_nxv8f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, zero, e32,m4,ta,mu
+; CHECK-NEXT: vle32.v v28, (a0)
+; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu
+; CHECK-NEXT: vfmacc.vv v16, v20, v28, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x float> @llvm.riscv.vfmacc.mask.nxv8f32.nxv8f32(
+ <vscale x 8 x float> %0,
+ <vscale x 8 x float> %1,
+ <vscale x 8 x float> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x float> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vfmacc.nxv1f64.nxv1f64(
+ <vscale x 1 x double>,
+ <vscale x 1 x double>,
+ <vscale x 1 x double>,
+ i64);
+
+define <vscale x 1 x double> @intrinsic_vfmacc_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmacc_vv_nxv1f64_nxv1f64_nxv1f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu
+; CHECK-NEXT: vfmacc.vv v16, v17, v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x double> @llvm.riscv.vfmacc.nxv1f64.nxv1f64(
+ <vscale x 1 x double> %0,
+ <vscale x 1 x double> %1,
+ <vscale x 1 x double> %2,
+ i64 %3)
+
+ ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vfmacc.mask.nxv1f64.nxv1f64(
+ <vscale x 1 x double>,
+ <vscale x 1 x double>,
+ <vscale x 1 x double>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x double> @intrinsic_vfmacc_mask_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv1f64_nxv1f64_nxv1f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu
+; CHECK-NEXT: vfmacc.vv v16, v17, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x double> @llvm.riscv.vfmacc.mask.nxv1f64.nxv1f64(
+ <vscale x 1 x double> %0,
+ <vscale x 1 x double> %1,
+ <vscale x 1 x double> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vfmacc.nxv2f64.nxv2f64(
+ <vscale x 2 x double>,
+ <vscale x 2 x double>,
+ <vscale x 2 x double>,
+ i64);
+
+define <vscale x 2 x double> @intrinsic_vfmacc_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmacc_vv_nxv2f64_nxv2f64_nxv2f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu
+; CHECK-NEXT: vfmacc.vv v16, v18, v20
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x double> @llvm.riscv.vfmacc.nxv2f64.nxv2f64(
+ <vscale x 2 x double> %0,
+ <vscale x 2 x double> %1,
+ <vscale x 2 x double> %2,
+ i64 %3)
+
+ ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vfmacc.mask.nxv2f64.nxv2f64(
+ <vscale x 2 x double>,
+ <vscale x 2 x double>,
+ <vscale x 2 x double>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x double> @intrinsic_vfmacc_mask_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv2f64_nxv2f64_nxv2f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu
+; CHECK-NEXT: vfmacc.vv v16, v18, v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x double> @llvm.riscv.vfmacc.mask.nxv2f64.nxv2f64(
+ <vscale x 2 x double> %0,
+ <vscale x 2 x double> %1,
+ <vscale x 2 x double> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vfmacc.nxv4f64.nxv4f64(
+ <vscale x 4 x double>,
+ <vscale x 4 x double>,
+ <vscale x 4 x double>,
+ i64);
+
+define <vscale x 4 x double> @intrinsic_vfmacc_vv_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmacc_vv_nxv4f64_nxv4f64_nxv4f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, zero, e64,m4,ta,mu
+; CHECK-NEXT: vle64.v v28, (a0)
+; CHECK-NEXT: vsetvli a0, a1, e64,m4,ta,mu
+; CHECK-NEXT: vfmacc.vv v16, v20, v28
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x double> @llvm.riscv.vfmacc.nxv4f64.nxv4f64(
+ <vscale x 4 x double> %0,
+ <vscale x 4 x double> %1,
+ <vscale x 4 x double> %2,
+ i64 %3)
+
+ ret <vscale x 4 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vfmacc.mask.nxv4f64.nxv4f64(
+ <vscale x 4 x double>,
+ <vscale x 4 x double>,
+ <vscale x 4 x double>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x double> @intrinsic_vfmacc_mask_vv_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv4f64_nxv4f64_nxv4f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, zero, e64,m4,ta,mu
+; CHECK-NEXT: vle64.v v28, (a0)
+; CHECK-NEXT: vsetvli a0, a1, e64,m4,ta,mu
+; CHECK-NEXT: vfmacc.vv v16, v20, v28, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x double> @llvm.riscv.vfmacc.mask.nxv4f64.nxv4f64(
+ <vscale x 4 x double> %0,
+ <vscale x 4 x double> %1,
+ <vscale x 4 x double> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x double> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vfmacc.nxv1f16.f16(
+ <vscale x 1 x half>,
+ half,
+ <vscale x 1 x half>,
+ i64);
+
+define <vscale x 1 x half> @intrinsic_vfmacc_vf_nxv1f16_f16_nxv1f16(<vscale x 1 x half> %0, half %1, <vscale x 1 x half> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmacc_vf_nxv1f16_f16_nxv1f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.h.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e16,mf4,ta,mu
+; CHECK-NEXT: vfmacc.vf v16, ft0, v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x half> @llvm.riscv.vfmacc.nxv1f16.f16(
+ <vscale x 1 x half> %0,
+ half %1,
+ <vscale x 1 x half> %2,
+ i64 %3)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vfmacc.mask.nxv1f16.f16(
+ <vscale x 1 x half>,
+ half,
+ <vscale x 1 x half>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x half> @intrinsic_vfmacc_mask_vf_nxv1f16_f16_nxv1f16(<vscale x 1 x half> %0, half %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv1f16_f16_nxv1f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.h.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e16,mf4,ta,mu
+; CHECK-NEXT: vfmacc.vf v16, ft0, v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x half> @llvm.riscv.vfmacc.mask.nxv1f16.f16(
+ <vscale x 1 x half> %0,
+ half %1,
+ <vscale x 1 x half> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 2 x half> @llvm.riscv.vfmacc.nxv2f16.f16(
+ <vscale x 2 x half>,
+ half,
+ <vscale x 2 x half>,
+ i64);
+
+define <vscale x 2 x half> @intrinsic_vfmacc_vf_nxv2f16_f16_nxv2f16(<vscale x 2 x half> %0, half %1, <vscale x 2 x half> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmacc_vf_nxv2f16_f16_nxv2f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.h.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e16,mf2,ta,mu
+; CHECK-NEXT: vfmacc.vf v16, ft0, v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x half> @llvm.riscv.vfmacc.nxv2f16.f16(
+ <vscale x 2 x half> %0,
+ half %1,
+ <vscale x 2 x half> %2,
+ i64 %3)
+
+ ret <vscale x 2 x half> %a
+}
+
+declare <vscale x 2 x half> @llvm.riscv.vfmacc.mask.nxv2f16.f16(
+ <vscale x 2 x half>,
+ half,
+ <vscale x 2 x half>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x half> @intrinsic_vfmacc_mask_vf_nxv2f16_f16_nxv2f16(<vscale x 2 x half> %0, half %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv2f16_f16_nxv2f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.h.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e16,mf2,ta,mu
+; CHECK-NEXT: vfmacc.vf v16, ft0, v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x half> @llvm.riscv.vfmacc.mask.nxv2f16.f16(
+ <vscale x 2 x half> %0,
+ half %1,
+ <vscale x 2 x half> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vfmacc.nxv4f16.f16(
+ <vscale x 4 x half>,
+ half,
+ <vscale x 4 x half>,
+ i64);
+
+define <vscale x 4 x half> @intrinsic_vfmacc_vf_nxv4f16_f16_nxv4f16(<vscale x 4 x half> %0, half %1, <vscale x 4 x half> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmacc_vf_nxv4f16_f16_nxv4f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.h.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e16,m1,ta,mu
+; CHECK-NEXT: vfmacc.vf v16, ft0, v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x half> @llvm.riscv.vfmacc.nxv4f16.f16(
+ <vscale x 4 x half> %0,
+ half %1,
+ <vscale x 4 x half> %2,
+ i64 %3)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vfmacc.mask.nxv4f16.f16(
+ <vscale x 4 x half>,
+ half,
+ <vscale x 4 x half>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x half> @intrinsic_vfmacc_mask_vf_nxv4f16_f16_nxv4f16(<vscale x 4 x half> %0, half %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv4f16_f16_nxv4f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.h.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e16,m1,ta,mu
+; CHECK-NEXT: vfmacc.vf v16, ft0, v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x half> @llvm.riscv.vfmacc.mask.nxv4f16.f16(
+ <vscale x 4 x half> %0,
+ half %1,
+ <vscale x 4 x half> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 8 x half> @llvm.riscv.vfmacc.nxv8f16.f16(
+ <vscale x 8 x half>,
+ half,
+ <vscale x 8 x half>,
+ i64);
+
+define <vscale x 8 x half> @intrinsic_vfmacc_vf_nxv8f16_f16_nxv8f16(<vscale x 8 x half> %0, half %1, <vscale x 8 x half> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmacc_vf_nxv8f16_f16_nxv8f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.h.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e16,m2,ta,mu
+; CHECK-NEXT: vfmacc.vf v16, ft0, v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x half> @llvm.riscv.vfmacc.nxv8f16.f16(
+ <vscale x 8 x half> %0,
+ half %1,
+ <vscale x 8 x half> %2,
+ i64 %3)
+
+ ret <vscale x 8 x half> %a
+}
+
+declare <vscale x 8 x half> @llvm.riscv.vfmacc.mask.nxv8f16.f16(
+ <vscale x 8 x half>,
+ half,
+ <vscale x 8 x half>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x half> @intrinsic_vfmacc_mask_vf_nxv8f16_f16_nxv8f16(<vscale x 8 x half> %0, half %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv8f16_f16_nxv8f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.h.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e16,m2,ta,mu
+; CHECK-NEXT: vfmacc.vf v16, ft0, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x half> @llvm.riscv.vfmacc.mask.nxv8f16.f16(
+ <vscale x 8 x half> %0,
+ half %1,
+ <vscale x 8 x half> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x half> %a
+}
+
+declare <vscale x 16 x half> @llvm.riscv.vfmacc.nxv16f16.f16(
+ <vscale x 16 x half>,
+ half,
+ <vscale x 16 x half>,
+ i64);
+
+define <vscale x 16 x half> @intrinsic_vfmacc_vf_nxv16f16_f16_nxv16f16(<vscale x 16 x half> %0, half %1, <vscale x 16 x half> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmacc_vf_nxv16f16_f16_nxv16f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.h.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu
+; CHECK-NEXT: vfmacc.vf v16, ft0, v20
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x half> @llvm.riscv.vfmacc.nxv16f16.f16(
+ <vscale x 16 x half> %0,
+ half %1,
+ <vscale x 16 x half> %2,
+ i64 %3)
+
+ ret <vscale x 16 x half> %a
+}
+
+declare <vscale x 16 x half> @llvm.riscv.vfmacc.mask.nxv16f16.f16(
+ <vscale x 16 x half>,
+ half,
+ <vscale x 16 x half>,
+ <vscale x 16 x i1>,
+ i64);
+
+define <vscale x 16 x half> @intrinsic_vfmacc_mask_vf_nxv16f16_f16_nxv16f16(<vscale x 16 x half> %0, half %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv16f16_f16_nxv16f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.h.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu
+; CHECK-NEXT: vfmacc.vf v16, ft0, v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x half> @llvm.riscv.vfmacc.mask.nxv16f16.f16(
+ <vscale x 16 x half> %0,
+ half %1,
+ <vscale x 16 x half> %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 16 x half> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vfmacc.nxv1f32.f32(
+ <vscale x 1 x float>,
+ float,
+ <vscale x 1 x float>,
+ i64);
+
+define <vscale x 1 x float> @intrinsic_vfmacc_vf_nxv1f32_f32_nxv1f32(<vscale x 1 x float> %0, float %1, <vscale x 1 x float> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmacc_vf_nxv1f32_f32_nxv1f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.w.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e32,mf2,ta,mu
+; CHECK-NEXT: vfmacc.vf v16, ft0, v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vfmacc.nxv1f32.f32(
+ <vscale x 1 x float> %0,
+ float %1,
+ <vscale x 1 x float> %2,
+ i64 %3)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vfmacc.mask.nxv1f32.f32(
+ <vscale x 1 x float>,
+ float,
+ <vscale x 1 x float>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x float> @intrinsic_vfmacc_mask_vf_nxv1f32_f32_nxv1f32(<vscale x 1 x float> %0, float %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv1f32_f32_nxv1f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.w.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e32,mf2,ta,mu
+; CHECK-NEXT: vfmacc.vf v16, ft0, v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vfmacc.mask.nxv1f32.f32(
+ <vscale x 1 x float> %0,
+ float %1,
+ <vscale x 1 x float> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfmacc.nxv2f32.f32(
+ <vscale x 2 x float>,
+ float,
+ <vscale x 2 x float>,
+ i64);
+
+define <vscale x 2 x float> @intrinsic_vfmacc_vf_nxv2f32_f32_nxv2f32(<vscale x 2 x float> %0, float %1, <vscale x 2 x float> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmacc_vf_nxv2f32_f32_nxv2f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.w.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e32,m1,ta,mu
+; CHECK-NEXT: vfmacc.vf v16, ft0, v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x float> @llvm.riscv.vfmacc.nxv2f32.f32(
+ <vscale x 2 x float> %0,
+ float %1,
+ <vscale x 2 x float> %2,
+ i64 %3)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfmacc.mask.nxv2f32.f32(
+ <vscale x 2 x float>,
+ float,
+ <vscale x 2 x float>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x float> @intrinsic_vfmacc_mask_vf_nxv2f32_f32_nxv2f32(<vscale x 2 x float> %0, float %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv2f32_f32_nxv2f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.w.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e32,m1,ta,mu
+; CHECK-NEXT: vfmacc.vf v16, ft0, v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x float> @llvm.riscv.vfmacc.mask.nxv2f32.f32(
+ <vscale x 2 x float> %0,
+ float %1,
+ <vscale x 2 x float> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vfmacc.nxv4f32.f32(
+ <vscale x 4 x float>,
+ float,
+ <vscale x 4 x float>,
+ i64);
+
+define <vscale x 4 x float> @intrinsic_vfmacc_vf_nxv4f32_f32_nxv4f32(<vscale x 4 x float> %0, float %1, <vscale x 4 x float> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmacc_vf_nxv4f32_f32_nxv4f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.w.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e32,m2,ta,mu
+; CHECK-NEXT: vfmacc.vf v16, ft0, v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x float> @llvm.riscv.vfmacc.nxv4f32.f32(
+ <vscale x 4 x float> %0,
+ float %1,
+ <vscale x 4 x float> %2,
+ i64 %3)
+
+ ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vfmacc.mask.nxv4f32.f32(
+ <vscale x 4 x float>,
+ float,
+ <vscale x 4 x float>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x float> @intrinsic_vfmacc_mask_vf_nxv4f32_f32_nxv4f32(<vscale x 4 x float> %0, float %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv4f32_f32_nxv4f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.w.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e32,m2,ta,mu
+; CHECK-NEXT: vfmacc.vf v16, ft0, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x float> @llvm.riscv.vfmacc.mask.nxv4f32.f32(
+ <vscale x 4 x float> %0,
+ float %1,
+ <vscale x 4 x float> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vfmacc.nxv8f32.f32(
+ <vscale x 8 x float>,
+ float,
+ <vscale x 8 x float>,
+ i64);
+
+define <vscale x 8 x float> @intrinsic_vfmacc_vf_nxv8f32_f32_nxv8f32(<vscale x 8 x float> %0, float %1, <vscale x 8 x float> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmacc_vf_nxv8f32_f32_nxv8f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.w.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu
+; CHECK-NEXT: vfmacc.vf v16, ft0, v20
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x float> @llvm.riscv.vfmacc.nxv8f32.f32(
+ <vscale x 8 x float> %0,
+ float %1,
+ <vscale x 8 x float> %2,
+ i64 %3)
+
+ ret <vscale x 8 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vfmacc.mask.nxv8f32.f32(
+ <vscale x 8 x float>,
+ float,
+ <vscale x 8 x float>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x float> @intrinsic_vfmacc_mask_vf_nxv8f32_f32_nxv8f32(<vscale x 8 x float> %0, float %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv8f32_f32_nxv8f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.w.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu
+; CHECK-NEXT: vfmacc.vf v16, ft0, v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x float> @llvm.riscv.vfmacc.mask.nxv8f32.f32(
+ <vscale x 8 x float> %0,
+ float %1,
+ <vscale x 8 x float> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x float> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vfmacc.nxv1f64.f64(
+ <vscale x 1 x double>,
+ double,
+ <vscale x 1 x double>,
+ i64);
+
+define <vscale x 1 x double> @intrinsic_vfmacc_vf_nxv1f64_f64_nxv1f64(<vscale x 1 x double> %0, double %1, <vscale x 1 x double> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmacc_vf_nxv1f64_f64_nxv1f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.d.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e64,m1,ta,mu
+; CHECK-NEXT: vfmacc.vf v16, ft0, v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x double> @llvm.riscv.vfmacc.nxv1f64.f64(
+ <vscale x 1 x double> %0,
+ double %1,
+ <vscale x 1 x double> %2,
+ i64 %3)
+
+ ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vfmacc.mask.nxv1f64.f64(
+ <vscale x 1 x double>,
+ double,
+ <vscale x 1 x double>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x double> @intrinsic_vfmacc_mask_vf_nxv1f64_f64_nxv1f64(<vscale x 1 x double> %0, double %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv1f64_f64_nxv1f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.d.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e64,m1,ta,mu
+; CHECK-NEXT: vfmacc.vf v16, ft0, v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x double> @llvm.riscv.vfmacc.mask.nxv1f64.f64(
+ <vscale x 1 x double> %0,
+ double %1,
+ <vscale x 1 x double> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vfmacc.nxv2f64.f64(
+ <vscale x 2 x double>,
+ double,
+ <vscale x 2 x double>,
+ i64);
+
+define <vscale x 2 x double> @intrinsic_vfmacc_vf_nxv2f64_f64_nxv2f64(<vscale x 2 x double> %0, double %1, <vscale x 2 x double> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmacc_vf_nxv2f64_f64_nxv2f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.d.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e64,m2,ta,mu
+; CHECK-NEXT: vfmacc.vf v16, ft0, v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x double> @llvm.riscv.vfmacc.nxv2f64.f64(
+ <vscale x 2 x double> %0,
+ double %1,
+ <vscale x 2 x double> %2,
+ i64 %3)
+
+ ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vfmacc.mask.nxv2f64.f64(
+ <vscale x 2 x double>,
+ double,
+ <vscale x 2 x double>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x double> @intrinsic_vfmacc_mask_vf_nxv2f64_f64_nxv2f64(<vscale x 2 x double> %0, double %1, <vscale x 2 x double> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv2f64_f64_nxv2f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.d.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e64,m2,ta,mu
+; CHECK-NEXT: vfmacc.vf v16, ft0, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x double> @llvm.riscv.vfmacc.mask.nxv2f64.f64(
+ <vscale x 2 x double> %0,
+ double %1,
+ <vscale x 2 x double> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vfmacc.nxv4f64.f64(
+ <vscale x 4 x double>,
+ double,
+ <vscale x 4 x double>,
+ i64);
+
+define <vscale x 4 x double> @intrinsic_vfmacc_vf_nxv4f64_f64_nxv4f64(<vscale x 4 x double> %0, double %1, <vscale x 4 x double> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmacc_vf_nxv4f64_f64_nxv4f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.d.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e64,m4,ta,mu
+; CHECK-NEXT: vfmacc.vf v16, ft0, v20
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x double> @llvm.riscv.vfmacc.nxv4f64.f64(
+ <vscale x 4 x double> %0,
+ double %1,
+ <vscale x 4 x double> %2,
+ i64 %3)
+
+ ret <vscale x 4 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vfmacc.mask.nxv4f64.f64(
+ <vscale x 4 x double>,
+ double,
+ <vscale x 4 x double>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x double> @intrinsic_vfmacc_mask_vf_nxv4f64_f64_nxv4f64(<vscale x 4 x double> %0, double %1, <vscale x 4 x double> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv4f64_f64_nxv4f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.d.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e64,m4,ta,mu
+; CHECK-NEXT: vfmacc.vf v16, ft0, v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x double> @llvm.riscv.vfmacc.mask.nxv4f64.f64(
+ <vscale x 4 x double> %0,
+ double %1,
+ <vscale x 4 x double> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x double> %a
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmadd-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfmadd-rv32.ll
new file mode 100644
index 000000000000..344a21bb0895
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vfmadd-rv32.ll
@@ -0,0 +1,856 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f,+experimental-zfh -verify-machineinstrs \
+; RUN: --riscv-no-aliases < %s | FileCheck %s
+declare <vscale x 1 x half> @llvm.riscv.vfmadd.nxv1f16.nxv1f16(
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ i32);
+
+define <vscale x 1 x half> @intrinsic_vfmadd_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmadd_vv_nxv1f16_nxv1f16_nxv1f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu
+; CHECK-NEXT: vfmadd.vv v16, v17, v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x half> @llvm.riscv.vfmadd.nxv1f16.nxv1f16(
+ <vscale x 1 x half> %0,
+ <vscale x 1 x half> %1,
+ <vscale x 1 x half> %2,
+ i32 %3)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vfmadd.mask.nxv1f16.nxv1f16(
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x half> @intrinsic_vfmadd_mask_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv1f16_nxv1f16_nxv1f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu
+; CHECK-NEXT: vfmadd.vv v16, v17, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x half> @llvm.riscv.vfmadd.mask.nxv1f16.nxv1f16(
+ <vscale x 1 x half> %0,
+ <vscale x 1 x half> %1,
+ <vscale x 1 x half> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 2 x half> @llvm.riscv.vfmadd.nxv2f16.nxv2f16(
+ <vscale x 2 x half>,
+ <vscale x 2 x half>,
+ <vscale x 2 x half>,
+ i32);
+
+define <vscale x 2 x half> @intrinsic_vfmadd_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmadd_vv_nxv2f16_nxv2f16_nxv2f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu
+; CHECK-NEXT: vfmadd.vv v16, v17, v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x half> @llvm.riscv.vfmadd.nxv2f16.nxv2f16(
+ <vscale x 2 x half> %0,
+ <vscale x 2 x half> %1,
+ <vscale x 2 x half> %2,
+ i32 %3)
+
+ ret <vscale x 2 x half> %a
+}
+
+declare <vscale x 2 x half> @llvm.riscv.vfmadd.mask.nxv2f16.nxv2f16(
+ <vscale x 2 x half>,
+ <vscale x 2 x half>,
+ <vscale x 2 x half>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x half> @intrinsic_vfmadd_mask_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv2f16_nxv2f16_nxv2f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu
+; CHECK-NEXT: vfmadd.vv v16, v17, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x half> @llvm.riscv.vfmadd.mask.nxv2f16.nxv2f16(
+ <vscale x 2 x half> %0,
+ <vscale x 2 x half> %1,
+ <vscale x 2 x half> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vfmadd.nxv4f16.nxv4f16(
+ <vscale x 4 x half>,
+ <vscale x 4 x half>,
+ <vscale x 4 x half>,
+ i32);
+
+define <vscale x 4 x half> @intrinsic_vfmadd_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmadd_vv_nxv4f16_nxv4f16_nxv4f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu
+; CHECK-NEXT: vfmadd.vv v16, v17, v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x half> @llvm.riscv.vfmadd.nxv4f16.nxv4f16(
+ <vscale x 4 x half> %0,
+ <vscale x 4 x half> %1,
+ <vscale x 4 x half> %2,
+ i32 %3)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vfmadd.mask.nxv4f16.nxv4f16(
+ <vscale x 4 x half>,
+ <vscale x 4 x half>,
+ <vscale x 4 x half>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x half> @intrinsic_vfmadd_mask_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv4f16_nxv4f16_nxv4f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu
+; CHECK-NEXT: vfmadd.vv v16, v17, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x half> @llvm.riscv.vfmadd.mask.nxv4f16.nxv4f16(
+ <vscale x 4 x half> %0,
+ <vscale x 4 x half> %1,
+ <vscale x 4 x half> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 8 x half> @llvm.riscv.vfmadd.nxv8f16.nxv8f16(
+ <vscale x 8 x half>,
+ <vscale x 8 x half>,
+ <vscale x 8 x half>,
+ i32);
+
+define <vscale x 8 x half> @intrinsic_vfmadd_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmadd_vv_nxv8f16_nxv8f16_nxv8f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu
+; CHECK-NEXT: vfmadd.vv v16, v18, v20
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x half> @llvm.riscv.vfmadd.nxv8f16.nxv8f16(
+ <vscale x 8 x half> %0,
+ <vscale x 8 x half> %1,
+ <vscale x 8 x half> %2,
+ i32 %3)
+
+ ret <vscale x 8 x half> %a
+}
+
+declare <vscale x 8 x half> @llvm.riscv.vfmadd.mask.nxv8f16.nxv8f16(
+ <vscale x 8 x half>,
+ <vscale x 8 x half>,
+ <vscale x 8 x half>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x half> @intrinsic_vfmadd_mask_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv8f16_nxv8f16_nxv8f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu
+; CHECK-NEXT: vfmadd.vv v16, v18, v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x half> @llvm.riscv.vfmadd.mask.nxv8f16.nxv8f16(
+ <vscale x 8 x half> %0,
+ <vscale x 8 x half> %1,
+ <vscale x 8 x half> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x half> %a
+}
+
+declare <vscale x 16 x half> @llvm.riscv.vfmadd.nxv16f16.nxv16f16(
+ <vscale x 16 x half>,
+ <vscale x 16 x half>,
+ <vscale x 16 x half>,
+ i32);
+
+define <vscale x 16 x half> @intrinsic_vfmadd_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmadd_vv_nxv16f16_nxv16f16_nxv16f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, zero, e16,m4,ta,mu
+; CHECK-NEXT: vle16.v v28, (a0)
+; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu
+; CHECK-NEXT: vfmadd.vv v16, v20, v28
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x half> @llvm.riscv.vfmadd.nxv16f16.nxv16f16(
+ <vscale x 16 x half> %0,
+ <vscale x 16 x half> %1,
+ <vscale x 16 x half> %2,
+ i32 %3)
+
+ ret <vscale x 16 x half> %a
+}
+
+declare <vscale x 16 x half> @llvm.riscv.vfmadd.mask.nxv16f16.nxv16f16(
+ <vscale x 16 x half>,
+ <vscale x 16 x half>,
+ <vscale x 16 x half>,
+ <vscale x 16 x i1>,
+ i32);
+
+define <vscale x 16 x half> @intrinsic_vfmadd_mask_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv16f16_nxv16f16_nxv16f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, zero, e16,m4,ta,mu
+; CHECK-NEXT: vle16.v v28, (a0)
+; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu
+; CHECK-NEXT: vfmadd.vv v16, v20, v28, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x half> @llvm.riscv.vfmadd.mask.nxv16f16.nxv16f16(
+ <vscale x 16 x half> %0,
+ <vscale x 16 x half> %1,
+ <vscale x 16 x half> %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 16 x half> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vfmadd.nxv1f32.nxv1f32(
+ <vscale x 1 x float>,
+ <vscale x 1 x float>,
+ <vscale x 1 x float>,
+ i32);
+
+define <vscale x 1 x float> @intrinsic_vfmadd_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmadd_vv_nxv1f32_nxv1f32_nxv1f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu
+; CHECK-NEXT: vfmadd.vv v16, v17, v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vfmadd.nxv1f32.nxv1f32(
+ <vscale x 1 x float> %0,
+ <vscale x 1 x float> %1,
+ <vscale x 1 x float> %2,
+ i32 %3)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vfmadd.mask.nxv1f32.nxv1f32(
+ <vscale x 1 x float>,
+ <vscale x 1 x float>,
+ <vscale x 1 x float>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x float> @intrinsic_vfmadd_mask_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv1f32_nxv1f32_nxv1f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu
+; CHECK-NEXT: vfmadd.vv v16, v17, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vfmadd.mask.nxv1f32.nxv1f32(
+ <vscale x 1 x float> %0,
+ <vscale x 1 x float> %1,
+ <vscale x 1 x float> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfmadd.nxv2f32.nxv2f32(
+ <vscale x 2 x float>,
+ <vscale x 2 x float>,
+ <vscale x 2 x float>,
+ i32);
+
+define <vscale x 2 x float> @intrinsic_vfmadd_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmadd_vv_nxv2f32_nxv2f32_nxv2f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu
+; CHECK-NEXT: vfmadd.vv v16, v17, v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x float> @llvm.riscv.vfmadd.nxv2f32.nxv2f32(
+ <vscale x 2 x float> %0,
+ <vscale x 2 x float> %1,
+ <vscale x 2 x float> %2,
+ i32 %3)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfmadd.mask.nxv2f32.nxv2f32(
+ <vscale x 2 x float>,
+ <vscale x 2 x float>,
+ <vscale x 2 x float>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x float> @intrinsic_vfmadd_mask_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv2f32_nxv2f32_nxv2f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu
+; CHECK-NEXT: vfmadd.vv v16, v17, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x float> @llvm.riscv.vfmadd.mask.nxv2f32.nxv2f32(
+ <vscale x 2 x float> %0,
+ <vscale x 2 x float> %1,
+ <vscale x 2 x float> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vfmadd.nxv4f32.nxv4f32(
+ <vscale x 4 x float>,
+ <vscale x 4 x float>,
+ <vscale x 4 x float>,
+ i32);
+
+define <vscale x 4 x float> @intrinsic_vfmadd_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmadd_vv_nxv4f32_nxv4f32_nxv4f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu
+; CHECK-NEXT: vfmadd.vv v16, v18, v20
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x float> @llvm.riscv.vfmadd.nxv4f32.nxv4f32(
+ <vscale x 4 x float> %0,
+ <vscale x 4 x float> %1,
+ <vscale x 4 x float> %2,
+ i32 %3)
+
+ ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vfmadd.mask.nxv4f32.nxv4f32(
+ <vscale x 4 x float>,
+ <vscale x 4 x float>,
+ <vscale x 4 x float>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x float> @intrinsic_vfmadd_mask_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv4f32_nxv4f32_nxv4f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu
+; CHECK-NEXT: vfmadd.vv v16, v18, v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x float> @llvm.riscv.vfmadd.mask.nxv4f32.nxv4f32(
+ <vscale x 4 x float> %0,
+ <vscale x 4 x float> %1,
+ <vscale x 4 x float> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vfmadd.nxv8f32.nxv8f32(
+ <vscale x 8 x float>,
+ <vscale x 8 x float>,
+ <vscale x 8 x float>,
+ i32);
+
+define <vscale x 8 x float> @intrinsic_vfmadd_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmadd_vv_nxv8f32_nxv8f32_nxv8f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, zero, e32,m4,ta,mu
+; CHECK-NEXT: vle32.v v28, (a0)
+; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu
+; CHECK-NEXT: vfmadd.vv v16, v20, v28
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x float> @llvm.riscv.vfmadd.nxv8f32.nxv8f32(
+ <vscale x 8 x float> %0,
+ <vscale x 8 x float> %1,
+ <vscale x 8 x float> %2,
+ i32 %3)
+
+ ret <vscale x 8 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vfmadd.mask.nxv8f32.nxv8f32(
+ <vscale x 8 x float>,
+ <vscale x 8 x float>,
+ <vscale x 8 x float>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x float> @intrinsic_vfmadd_mask_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv8f32_nxv8f32_nxv8f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, zero, e32,m4,ta,mu
+; CHECK-NEXT: vle32.v v28, (a0)
+; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu
+; CHECK-NEXT: vfmadd.vv v16, v20, v28, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x float> @llvm.riscv.vfmadd.mask.nxv8f32.nxv8f32(
+ <vscale x 8 x float> %0,
+ <vscale x 8 x float> %1,
+ <vscale x 8 x float> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x float> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vfmadd.nxv1f16.f16(
+ <vscale x 1 x half>,
+ half,
+ <vscale x 1 x half>,
+ i32);
+
+define <vscale x 1 x half> @intrinsic_vfmadd_vf_nxv1f16_f16_nxv1f16(<vscale x 1 x half> %0, half %1, <vscale x 1 x half> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmadd_vf_nxv1f16_f16_nxv1f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.h.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e16,mf4,ta,mu
+; CHECK-NEXT: vfmadd.vf v16, ft0, v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x half> @llvm.riscv.vfmadd.nxv1f16.f16(
+ <vscale x 1 x half> %0,
+ half %1,
+ <vscale x 1 x half> %2,
+ i32 %3)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vfmadd.mask.nxv1f16.f16(
+ <vscale x 1 x half>,
+ half,
+ <vscale x 1 x half>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x half> @intrinsic_vfmadd_mask_vf_nxv1f16_f16_nxv1f16(<vscale x 1 x half> %0, half %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv1f16_f16_nxv1f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.h.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e16,mf4,ta,mu
+; CHECK-NEXT: vfmadd.vf v16, ft0, v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x half> @llvm.riscv.vfmadd.mask.nxv1f16.f16(
+ <vscale x 1 x half> %0,
+ half %1,
+ <vscale x 1 x half> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 2 x half> @llvm.riscv.vfmadd.nxv2f16.f16(
+ <vscale x 2 x half>,
+ half,
+ <vscale x 2 x half>,
+ i32);
+
+define <vscale x 2 x half> @intrinsic_vfmadd_vf_nxv2f16_f16_nxv2f16(<vscale x 2 x half> %0, half %1, <vscale x 2 x half> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmadd_vf_nxv2f16_f16_nxv2f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.h.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e16,mf2,ta,mu
+; CHECK-NEXT: vfmadd.vf v16, ft0, v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x half> @llvm.riscv.vfmadd.nxv2f16.f16(
+ <vscale x 2 x half> %0,
+ half %1,
+ <vscale x 2 x half> %2,
+ i32 %3)
+
+ ret <vscale x 2 x half> %a
+}
+
+declare <vscale x 2 x half> @llvm.riscv.vfmadd.mask.nxv2f16.f16(
+ <vscale x 2 x half>,
+ half,
+ <vscale x 2 x half>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x half> @intrinsic_vfmadd_mask_vf_nxv2f16_f16_nxv2f16(<vscale x 2 x half> %0, half %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv2f16_f16_nxv2f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.h.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e16,mf2,ta,mu
+; CHECK-NEXT: vfmadd.vf v16, ft0, v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x half> @llvm.riscv.vfmadd.mask.nxv2f16.f16(
+ <vscale x 2 x half> %0,
+ half %1,
+ <vscale x 2 x half> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vfmadd.nxv4f16.f16(
+ <vscale x 4 x half>,
+ half,
+ <vscale x 4 x half>,
+ i32);
+
+define <vscale x 4 x half> @intrinsic_vfmadd_vf_nxv4f16_f16_nxv4f16(<vscale x 4 x half> %0, half %1, <vscale x 4 x half> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmadd_vf_nxv4f16_f16_nxv4f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.h.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e16,m1,ta,mu
+; CHECK-NEXT: vfmadd.vf v16, ft0, v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x half> @llvm.riscv.vfmadd.nxv4f16.f16(
+ <vscale x 4 x half> %0,
+ half %1,
+ <vscale x 4 x half> %2,
+ i32 %3)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vfmadd.mask.nxv4f16.f16(
+ <vscale x 4 x half>,
+ half,
+ <vscale x 4 x half>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x half> @intrinsic_vfmadd_mask_vf_nxv4f16_f16_nxv4f16(<vscale x 4 x half> %0, half %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv4f16_f16_nxv4f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.h.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e16,m1,ta,mu
+; CHECK-NEXT: vfmadd.vf v16, ft0, v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x half> @llvm.riscv.vfmadd.mask.nxv4f16.f16(
+ <vscale x 4 x half> %0,
+ half %1,
+ <vscale x 4 x half> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 8 x half> @llvm.riscv.vfmadd.nxv8f16.f16(
+ <vscale x 8 x half>,
+ half,
+ <vscale x 8 x half>,
+ i32);
+
+define <vscale x 8 x half> @intrinsic_vfmadd_vf_nxv8f16_f16_nxv8f16(<vscale x 8 x half> %0, half %1, <vscale x 8 x half> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmadd_vf_nxv8f16_f16_nxv8f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.h.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e16,m2,ta,mu
+; CHECK-NEXT: vfmadd.vf v16, ft0, v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x half> @llvm.riscv.vfmadd.nxv8f16.f16(
+ <vscale x 8 x half> %0,
+ half %1,
+ <vscale x 8 x half> %2,
+ i32 %3)
+
+ ret <vscale x 8 x half> %a
+}
+
+declare <vscale x 8 x half> @llvm.riscv.vfmadd.mask.nxv8f16.f16(
+ <vscale x 8 x half>,
+ half,
+ <vscale x 8 x half>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x half> @intrinsic_vfmadd_mask_vf_nxv8f16_f16_nxv8f16(<vscale x 8 x half> %0, half %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv8f16_f16_nxv8f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.h.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e16,m2,ta,mu
+; CHECK-NEXT: vfmadd.vf v16, ft0, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x half> @llvm.riscv.vfmadd.mask.nxv8f16.f16(
+ <vscale x 8 x half> %0,
+ half %1,
+ <vscale x 8 x half> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x half> %a
+}
+
+declare <vscale x 16 x half> @llvm.riscv.vfmadd.nxv16f16.f16(
+ <vscale x 16 x half>,
+ half,
+ <vscale x 16 x half>,
+ i32);
+
+define <vscale x 16 x half> @intrinsic_vfmadd_vf_nxv16f16_f16_nxv16f16(<vscale x 16 x half> %0, half %1, <vscale x 16 x half> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmadd_vf_nxv16f16_f16_nxv16f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.h.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu
+; CHECK-NEXT: vfmadd.vf v16, ft0, v20
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x half> @llvm.riscv.vfmadd.nxv16f16.f16(
+ <vscale x 16 x half> %0,
+ half %1,
+ <vscale x 16 x half> %2,
+ i32 %3)
+
+ ret <vscale x 16 x half> %a
+}
+
+declare <vscale x 16 x half> @llvm.riscv.vfmadd.mask.nxv16f16.f16(
+ <vscale x 16 x half>,
+ half,
+ <vscale x 16 x half>,
+ <vscale x 16 x i1>,
+ i32);
+
+define <vscale x 16 x half> @intrinsic_vfmadd_mask_vf_nxv16f16_f16_nxv16f16(<vscale x 16 x half> %0, half %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv16f16_f16_nxv16f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.h.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu
+; CHECK-NEXT: vfmadd.vf v16, ft0, v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x half> @llvm.riscv.vfmadd.mask.nxv16f16.f16(
+ <vscale x 16 x half> %0,
+ half %1,
+ <vscale x 16 x half> %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 16 x half> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vfmadd.nxv1f32.f32(
+ <vscale x 1 x float>,
+ float,
+ <vscale x 1 x float>,
+ i32);
+
+define <vscale x 1 x float> @intrinsic_vfmadd_vf_nxv1f32_f32_nxv1f32(<vscale x 1 x float> %0, float %1, <vscale x 1 x float> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmadd_vf_nxv1f32_f32_nxv1f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.w.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e32,mf2,ta,mu
+; CHECK-NEXT: vfmadd.vf v16, ft0, v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vfmadd.nxv1f32.f32(
+ <vscale x 1 x float> %0,
+ float %1,
+ <vscale x 1 x float> %2,
+ i32 %3)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vfmadd.mask.nxv1f32.f32(
+ <vscale x 1 x float>,
+ float,
+ <vscale x 1 x float>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x float> @intrinsic_vfmadd_mask_vf_nxv1f32_f32_nxv1f32(<vscale x 1 x float> %0, float %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv1f32_f32_nxv1f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.w.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e32,mf2,ta,mu
+; CHECK-NEXT: vfmadd.vf v16, ft0, v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vfmadd.mask.nxv1f32.f32(
+ <vscale x 1 x float> %0,
+ float %1,
+ <vscale x 1 x float> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfmadd.nxv2f32.f32(
+ <vscale x 2 x float>,
+ float,
+ <vscale x 2 x float>,
+ i32);
+
+define <vscale x 2 x float> @intrinsic_vfmadd_vf_nxv2f32_f32_nxv2f32(<vscale x 2 x float> %0, float %1, <vscale x 2 x float> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmadd_vf_nxv2f32_f32_nxv2f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.w.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e32,m1,ta,mu
+; CHECK-NEXT: vfmadd.vf v16, ft0, v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x float> @llvm.riscv.vfmadd.nxv2f32.f32(
+ <vscale x 2 x float> %0,
+ float %1,
+ <vscale x 2 x float> %2,
+ i32 %3)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfmadd.mask.nxv2f32.f32(
+ <vscale x 2 x float>,
+ float,
+ <vscale x 2 x float>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x float> @intrinsic_vfmadd_mask_vf_nxv2f32_f32_nxv2f32(<vscale x 2 x float> %0, float %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv2f32_f32_nxv2f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.w.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e32,m1,ta,mu
+; CHECK-NEXT: vfmadd.vf v16, ft0, v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x float> @llvm.riscv.vfmadd.mask.nxv2f32.f32(
+ <vscale x 2 x float> %0,
+ float %1,
+ <vscale x 2 x float> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vfmadd.nxv4f32.f32(
+ <vscale x 4 x float>,
+ float,
+ <vscale x 4 x float>,
+ i32);
+
+define <vscale x 4 x float> @intrinsic_vfmadd_vf_nxv4f32_f32_nxv4f32(<vscale x 4 x float> %0, float %1, <vscale x 4 x float> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmadd_vf_nxv4f32_f32_nxv4f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.w.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e32,m2,ta,mu
+; CHECK-NEXT: vfmadd.vf v16, ft0, v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x float> @llvm.riscv.vfmadd.nxv4f32.f32(
+ <vscale x 4 x float> %0,
+ float %1,
+ <vscale x 4 x float> %2,
+ i32 %3)
+
+ ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vfmadd.mask.nxv4f32.f32(
+ <vscale x 4 x float>,
+ float,
+ <vscale x 4 x float>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x float> @intrinsic_vfmadd_mask_vf_nxv4f32_f32_nxv4f32(<vscale x 4 x float> %0, float %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv4f32_f32_nxv4f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.w.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e32,m2,ta,mu
+; CHECK-NEXT: vfmadd.vf v16, ft0, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x float> @llvm.riscv.vfmadd.mask.nxv4f32.f32(
+ <vscale x 4 x float> %0,
+ float %1,
+ <vscale x 4 x float> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vfmadd.nxv8f32.f32(
+ <vscale x 8 x float>,
+ float,
+ <vscale x 8 x float>,
+ i32);
+
+define <vscale x 8 x float> @intrinsic_vfmadd_vf_nxv8f32_f32_nxv8f32(<vscale x 8 x float> %0, float %1, <vscale x 8 x float> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmadd_vf_nxv8f32_f32_nxv8f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.w.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu
+; CHECK-NEXT: vfmadd.vf v16, ft0, v20
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x float> @llvm.riscv.vfmadd.nxv8f32.f32(
+ <vscale x 8 x float> %0,
+ float %1,
+ <vscale x 8 x float> %2,
+ i32 %3)
+
+ ret <vscale x 8 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vfmadd.mask.nxv8f32.f32(
+ <vscale x 8 x float>,
+ float,
+ <vscale x 8 x float>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x float> @intrinsic_vfmadd_mask_vf_nxv8f32_f32_nxv8f32(<vscale x 8 x float> %0, float %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv8f32_f32_nxv8f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.w.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu
+; CHECK-NEXT: vfmadd.vf v16, ft0, v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x float> @llvm.riscv.vfmadd.mask.nxv8f32.f32(
+ <vscale x 8 x float> %0,
+ float %1,
+ <vscale x 8 x float> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x float> %a
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmadd-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfmadd-rv64.ll
new file mode 100644
index 000000000000..993f535777e0
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vfmadd-rv64.ll
@@ -0,0 +1,1142 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
+; RUN: --riscv-no-aliases < %s | FileCheck %s
+declare <vscale x 1 x half> @llvm.riscv.vfmadd.nxv1f16.nxv1f16(
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ i64);
+
+define <vscale x 1 x half> @intrinsic_vfmadd_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmadd_vv_nxv1f16_nxv1f16_nxv1f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu
+; CHECK-NEXT: vfmadd.vv v16, v17, v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x half> @llvm.riscv.vfmadd.nxv1f16.nxv1f16(
+ <vscale x 1 x half> %0,
+ <vscale x 1 x half> %1,
+ <vscale x 1 x half> %2,
+ i64 %3)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vfmadd.mask.nxv1f16.nxv1f16(
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x half> @intrinsic_vfmadd_mask_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv1f16_nxv1f16_nxv1f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu
+; CHECK-NEXT: vfmadd.vv v16, v17, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x half> @llvm.riscv.vfmadd.mask.nxv1f16.nxv1f16(
+ <vscale x 1 x half> %0,
+ <vscale x 1 x half> %1,
+ <vscale x 1 x half> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 2 x half> @llvm.riscv.vfmadd.nxv2f16.nxv2f16(
+ <vscale x 2 x half>,
+ <vscale x 2 x half>,
+ <vscale x 2 x half>,
+ i64);
+
+define <vscale x 2 x half> @intrinsic_vfmadd_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmadd_vv_nxv2f16_nxv2f16_nxv2f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu
+; CHECK-NEXT: vfmadd.vv v16, v17, v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x half> @llvm.riscv.vfmadd.nxv2f16.nxv2f16(
+ <vscale x 2 x half> %0,
+ <vscale x 2 x half> %1,
+ <vscale x 2 x half> %2,
+ i64 %3)
+
+ ret <vscale x 2 x half> %a
+}
+
+declare <vscale x 2 x half> @llvm.riscv.vfmadd.mask.nxv2f16.nxv2f16(
+ <vscale x 2 x half>,
+ <vscale x 2 x half>,
+ <vscale x 2 x half>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x half> @intrinsic_vfmadd_mask_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv2f16_nxv2f16_nxv2f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu
+; CHECK-NEXT: vfmadd.vv v16, v17, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x half> @llvm.riscv.vfmadd.mask.nxv2f16.nxv2f16(
+ <vscale x 2 x half> %0,
+ <vscale x 2 x half> %1,
+ <vscale x 2 x half> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vfmadd.nxv4f16.nxv4f16(
+ <vscale x 4 x half>,
+ <vscale x 4 x half>,
+ <vscale x 4 x half>,
+ i64);
+
+define <vscale x 4 x half> @intrinsic_vfmadd_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmadd_vv_nxv4f16_nxv4f16_nxv4f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu
+; CHECK-NEXT: vfmadd.vv v16, v17, v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x half> @llvm.riscv.vfmadd.nxv4f16.nxv4f16(
+ <vscale x 4 x half> %0,
+ <vscale x 4 x half> %1,
+ <vscale x 4 x half> %2,
+ i64 %3)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vfmadd.mask.nxv4f16.nxv4f16(
+ <vscale x 4 x half>,
+ <vscale x 4 x half>,
+ <vscale x 4 x half>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x half> @intrinsic_vfmadd_mask_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv4f16_nxv4f16_nxv4f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu
+; CHECK-NEXT: vfmadd.vv v16, v17, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x half> @llvm.riscv.vfmadd.mask.nxv4f16.nxv4f16(
+ <vscale x 4 x half> %0,
+ <vscale x 4 x half> %1,
+ <vscale x 4 x half> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 8 x half> @llvm.riscv.vfmadd.nxv8f16.nxv8f16(
+ <vscale x 8 x half>,
+ <vscale x 8 x half>,
+ <vscale x 8 x half>,
+ i64);
+
+define <vscale x 8 x half> @intrinsic_vfmadd_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmadd_vv_nxv8f16_nxv8f16_nxv8f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu
+; CHECK-NEXT: vfmadd.vv v16, v18, v20
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x half> @llvm.riscv.vfmadd.nxv8f16.nxv8f16(
+ <vscale x 8 x half> %0,
+ <vscale x 8 x half> %1,
+ <vscale x 8 x half> %2,
+ i64 %3)
+
+ ret <vscale x 8 x half> %a
+}
+
+declare <vscale x 8 x half> @llvm.riscv.vfmadd.mask.nxv8f16.nxv8f16(
+ <vscale x 8 x half>,
+ <vscale x 8 x half>,
+ <vscale x 8 x half>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x half> @intrinsic_vfmadd_mask_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv8f16_nxv8f16_nxv8f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu
+; CHECK-NEXT: vfmadd.vv v16, v18, v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x half> @llvm.riscv.vfmadd.mask.nxv8f16.nxv8f16(
+ <vscale x 8 x half> %0,
+ <vscale x 8 x half> %1,
+ <vscale x 8 x half> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x half> %a
+}
+
+declare <vscale x 16 x half> @llvm.riscv.vfmadd.nxv16f16.nxv16f16(
+ <vscale x 16 x half>,
+ <vscale x 16 x half>,
+ <vscale x 16 x half>,
+ i64);
+
+define <vscale x 16 x half> @intrinsic_vfmadd_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmadd_vv_nxv16f16_nxv16f16_nxv16f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, zero, e16,m4,ta,mu
+; CHECK-NEXT: vle16.v v28, (a0)
+; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu
+; CHECK-NEXT: vfmadd.vv v16, v20, v28
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x half> @llvm.riscv.vfmadd.nxv16f16.nxv16f16(
+ <vscale x 16 x half> %0,
+ <vscale x 16 x half> %1,
+ <vscale x 16 x half> %2,
+ i64 %3)
+
+ ret <vscale x 16 x half> %a
+}
+
+declare <vscale x 16 x half> @llvm.riscv.vfmadd.mask.nxv16f16.nxv16f16(
+ <vscale x 16 x half>,
+ <vscale x 16 x half>,
+ <vscale x 16 x half>,
+ <vscale x 16 x i1>,
+ i64);
+
+define <vscale x 16 x half> @intrinsic_vfmadd_mask_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv16f16_nxv16f16_nxv16f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, zero, e16,m4,ta,mu
+; CHECK-NEXT: vle16.v v28, (a0)
+; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu
+; CHECK-NEXT: vfmadd.vv v16, v20, v28, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x half> @llvm.riscv.vfmadd.mask.nxv16f16.nxv16f16(
+ <vscale x 16 x half> %0,
+ <vscale x 16 x half> %1,
+ <vscale x 16 x half> %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 16 x half> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vfmadd.nxv1f32.nxv1f32(
+ <vscale x 1 x float>,
+ <vscale x 1 x float>,
+ <vscale x 1 x float>,
+ i64);
+
+define <vscale x 1 x float> @intrinsic_vfmadd_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmadd_vv_nxv1f32_nxv1f32_nxv1f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu
+; CHECK-NEXT: vfmadd.vv v16, v17, v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vfmadd.nxv1f32.nxv1f32(
+ <vscale x 1 x float> %0,
+ <vscale x 1 x float> %1,
+ <vscale x 1 x float> %2,
+ i64 %3)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vfmadd.mask.nxv1f32.nxv1f32(
+ <vscale x 1 x float>,
+ <vscale x 1 x float>,
+ <vscale x 1 x float>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x float> @intrinsic_vfmadd_mask_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv1f32_nxv1f32_nxv1f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu
+; CHECK-NEXT: vfmadd.vv v16, v17, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vfmadd.mask.nxv1f32.nxv1f32(
+ <vscale x 1 x float> %0,
+ <vscale x 1 x float> %1,
+ <vscale x 1 x float> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfmadd.nxv2f32.nxv2f32(
+ <vscale x 2 x float>,
+ <vscale x 2 x float>,
+ <vscale x 2 x float>,
+ i64);
+
+define <vscale x 2 x float> @intrinsic_vfmadd_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmadd_vv_nxv2f32_nxv2f32_nxv2f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu
+; CHECK-NEXT: vfmadd.vv v16, v17, v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x float> @llvm.riscv.vfmadd.nxv2f32.nxv2f32(
+ <vscale x 2 x float> %0,
+ <vscale x 2 x float> %1,
+ <vscale x 2 x float> %2,
+ i64 %3)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfmadd.mask.nxv2f32.nxv2f32(
+ <vscale x 2 x float>,
+ <vscale x 2 x float>,
+ <vscale x 2 x float>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x float> @intrinsic_vfmadd_mask_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv2f32_nxv2f32_nxv2f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu
+; CHECK-NEXT: vfmadd.vv v16, v17, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x float> @llvm.riscv.vfmadd.mask.nxv2f32.nxv2f32(
+ <vscale x 2 x float> %0,
+ <vscale x 2 x float> %1,
+ <vscale x 2 x float> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vfmadd.nxv4f32.nxv4f32(
+ <vscale x 4 x float>,
+ <vscale x 4 x float>,
+ <vscale x 4 x float>,
+ i64);
+
+define <vscale x 4 x float> @intrinsic_vfmadd_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmadd_vv_nxv4f32_nxv4f32_nxv4f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu
+; CHECK-NEXT: vfmadd.vv v16, v18, v20
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x float> @llvm.riscv.vfmadd.nxv4f32.nxv4f32(
+ <vscale x 4 x float> %0,
+ <vscale x 4 x float> %1,
+ <vscale x 4 x float> %2,
+ i64 %3)
+
+ ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vfmadd.mask.nxv4f32.nxv4f32(
+ <vscale x 4 x float>,
+ <vscale x 4 x float>,
+ <vscale x 4 x float>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x float> @intrinsic_vfmadd_mask_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv4f32_nxv4f32_nxv4f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu
+; CHECK-NEXT: vfmadd.vv v16, v18, v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x float> @llvm.riscv.vfmadd.mask.nxv4f32.nxv4f32(
+ <vscale x 4 x float> %0,
+ <vscale x 4 x float> %1,
+ <vscale x 4 x float> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vfmadd.nxv8f32.nxv8f32(
+ <vscale x 8 x float>,
+ <vscale x 8 x float>,
+ <vscale x 8 x float>,
+ i64);
+
+define <vscale x 8 x float> @intrinsic_vfmadd_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmadd_vv_nxv8f32_nxv8f32_nxv8f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, zero, e32,m4,ta,mu
+; CHECK-NEXT: vle32.v v28, (a0)
+; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu
+; CHECK-NEXT: vfmadd.vv v16, v20, v28
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x float> @llvm.riscv.vfmadd.nxv8f32.nxv8f32(
+ <vscale x 8 x float> %0,
+ <vscale x 8 x float> %1,
+ <vscale x 8 x float> %2,
+ i64 %3)
+
+ ret <vscale x 8 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vfmadd.mask.nxv8f32.nxv8f32(
+ <vscale x 8 x float>,
+ <vscale x 8 x float>,
+ <vscale x 8 x float>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x float> @intrinsic_vfmadd_mask_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv8f32_nxv8f32_nxv8f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, zero, e32,m4,ta,mu
+; CHECK-NEXT: vle32.v v28, (a0)
+; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu
+; CHECK-NEXT: vfmadd.vv v16, v20, v28, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x float> @llvm.riscv.vfmadd.mask.nxv8f32.nxv8f32(
+ <vscale x 8 x float> %0,
+ <vscale x 8 x float> %1,
+ <vscale x 8 x float> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x float> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vfmadd.nxv1f64.nxv1f64(
+ <vscale x 1 x double>,
+ <vscale x 1 x double>,
+ <vscale x 1 x double>,
+ i64);
+
+define <vscale x 1 x double> @intrinsic_vfmadd_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmadd_vv_nxv1f64_nxv1f64_nxv1f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu
+; CHECK-NEXT: vfmadd.vv v16, v17, v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x double> @llvm.riscv.vfmadd.nxv1f64.nxv1f64(
+ <vscale x 1 x double> %0,
+ <vscale x 1 x double> %1,
+ <vscale x 1 x double> %2,
+ i64 %3)
+
+ ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vfmadd.mask.nxv1f64.nxv1f64(
+ <vscale x 1 x double>,
+ <vscale x 1 x double>,
+ <vscale x 1 x double>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x double> @intrinsic_vfmadd_mask_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv1f64_nxv1f64_nxv1f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu
+; CHECK-NEXT: vfmadd.vv v16, v17, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x double> @llvm.riscv.vfmadd.mask.nxv1f64.nxv1f64(
+ <vscale x 1 x double> %0,
+ <vscale x 1 x double> %1,
+ <vscale x 1 x double> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vfmadd.nxv2f64.nxv2f64(
+ <vscale x 2 x double>,
+ <vscale x 2 x double>,
+ <vscale x 2 x double>,
+ i64);
+
+define <vscale x 2 x double> @intrinsic_vfmadd_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmadd_vv_nxv2f64_nxv2f64_nxv2f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu
+; CHECK-NEXT: vfmadd.vv v16, v18, v20
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x double> @llvm.riscv.vfmadd.nxv2f64.nxv2f64(
+ <vscale x 2 x double> %0,
+ <vscale x 2 x double> %1,
+ <vscale x 2 x double> %2,
+ i64 %3)
+
+ ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vfmadd.mask.nxv2f64.nxv2f64(
+ <vscale x 2 x double>,
+ <vscale x 2 x double>,
+ <vscale x 2 x double>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x double> @intrinsic_vfmadd_mask_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv2f64_nxv2f64_nxv2f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu
+; CHECK-NEXT: vfmadd.vv v16, v18, v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x double> @llvm.riscv.vfmadd.mask.nxv2f64.nxv2f64(
+ <vscale x 2 x double> %0,
+ <vscale x 2 x double> %1,
+ <vscale x 2 x double> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vfmadd.nxv4f64.nxv4f64(
+ <vscale x 4 x double>,
+ <vscale x 4 x double>,
+ <vscale x 4 x double>,
+ i64);
+
+define <vscale x 4 x double> @intrinsic_vfmadd_vv_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmadd_vv_nxv4f64_nxv4f64_nxv4f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, zero, e64,m4,ta,mu
+; CHECK-NEXT: vle64.v v28, (a0)
+; CHECK-NEXT: vsetvli a0, a1, e64,m4,ta,mu
+; CHECK-NEXT: vfmadd.vv v16, v20, v28
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x double> @llvm.riscv.vfmadd.nxv4f64.nxv4f64(
+ <vscale x 4 x double> %0,
+ <vscale x 4 x double> %1,
+ <vscale x 4 x double> %2,
+ i64 %3)
+
+ ret <vscale x 4 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vfmadd.mask.nxv4f64.nxv4f64(
+ <vscale x 4 x double>,
+ <vscale x 4 x double>,
+ <vscale x 4 x double>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x double> @intrinsic_vfmadd_mask_vv_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv4f64_nxv4f64_nxv4f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, zero, e64,m4,ta,mu
+; CHECK-NEXT: vle64.v v28, (a0)
+; CHECK-NEXT: vsetvli a0, a1, e64,m4,ta,mu
+; CHECK-NEXT: vfmadd.vv v16, v20, v28, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x double> @llvm.riscv.vfmadd.mask.nxv4f64.nxv4f64(
+ <vscale x 4 x double> %0,
+ <vscale x 4 x double> %1,
+ <vscale x 4 x double> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x double> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vfmadd.nxv1f16.f16(
+ <vscale x 1 x half>,
+ half,
+ <vscale x 1 x half>,
+ i64);
+
+define <vscale x 1 x half> @intrinsic_vfmadd_vf_nxv1f16_f16_nxv1f16(<vscale x 1 x half> %0, half %1, <vscale x 1 x half> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmadd_vf_nxv1f16_f16_nxv1f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.h.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e16,mf4,ta,mu
+; CHECK-NEXT: vfmadd.vf v16, ft0, v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x half> @llvm.riscv.vfmadd.nxv1f16.f16(
+ <vscale x 1 x half> %0,
+ half %1,
+ <vscale x 1 x half> %2,
+ i64 %3)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vfmadd.mask.nxv1f16.f16(
+ <vscale x 1 x half>,
+ half,
+ <vscale x 1 x half>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x half> @intrinsic_vfmadd_mask_vf_nxv1f16_f16_nxv1f16(<vscale x 1 x half> %0, half %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv1f16_f16_nxv1f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.h.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e16,mf4,ta,mu
+; CHECK-NEXT: vfmadd.vf v16, ft0, v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x half> @llvm.riscv.vfmadd.mask.nxv1f16.f16(
+ <vscale x 1 x half> %0,
+ half %1,
+ <vscale x 1 x half> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 2 x half> @llvm.riscv.vfmadd.nxv2f16.f16(
+ <vscale x 2 x half>,
+ half,
+ <vscale x 2 x half>,
+ i64);
+
+define <vscale x 2 x half> @intrinsic_vfmadd_vf_nxv2f16_f16_nxv2f16(<vscale x 2 x half> %0, half %1, <vscale x 2 x half> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmadd_vf_nxv2f16_f16_nxv2f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.h.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e16,mf2,ta,mu
+; CHECK-NEXT: vfmadd.vf v16, ft0, v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x half> @llvm.riscv.vfmadd.nxv2f16.f16(
+ <vscale x 2 x half> %0,
+ half %1,
+ <vscale x 2 x half> %2,
+ i64 %3)
+
+ ret <vscale x 2 x half> %a
+}
+
+declare <vscale x 2 x half> @llvm.riscv.vfmadd.mask.nxv2f16.f16(
+ <vscale x 2 x half>,
+ half,
+ <vscale x 2 x half>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x half> @intrinsic_vfmadd_mask_vf_nxv2f16_f16_nxv2f16(<vscale x 2 x half> %0, half %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv2f16_f16_nxv2f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.h.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e16,mf2,ta,mu
+; CHECK-NEXT: vfmadd.vf v16, ft0, v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x half> @llvm.riscv.vfmadd.mask.nxv2f16.f16(
+ <vscale x 2 x half> %0,
+ half %1,
+ <vscale x 2 x half> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vfmadd.nxv4f16.f16(
+ <vscale x 4 x half>,
+ half,
+ <vscale x 4 x half>,
+ i64);
+
+define <vscale x 4 x half> @intrinsic_vfmadd_vf_nxv4f16_f16_nxv4f16(<vscale x 4 x half> %0, half %1, <vscale x 4 x half> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmadd_vf_nxv4f16_f16_nxv4f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.h.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e16,m1,ta,mu
+; CHECK-NEXT: vfmadd.vf v16, ft0, v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x half> @llvm.riscv.vfmadd.nxv4f16.f16(
+ <vscale x 4 x half> %0,
+ half %1,
+ <vscale x 4 x half> %2,
+ i64 %3)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vfmadd.mask.nxv4f16.f16(
+ <vscale x 4 x half>,
+ half,
+ <vscale x 4 x half>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x half> @intrinsic_vfmadd_mask_vf_nxv4f16_f16_nxv4f16(<vscale x 4 x half> %0, half %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv4f16_f16_nxv4f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.h.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e16,m1,ta,mu
+; CHECK-NEXT: vfmadd.vf v16, ft0, v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x half> @llvm.riscv.vfmadd.mask.nxv4f16.f16(
+ <vscale x 4 x half> %0,
+ half %1,
+ <vscale x 4 x half> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 8 x half> @llvm.riscv.vfmadd.nxv8f16.f16(
+ <vscale x 8 x half>,
+ half,
+ <vscale x 8 x half>,
+ i64);
+
+define <vscale x 8 x half> @intrinsic_vfmadd_vf_nxv8f16_f16_nxv8f16(<vscale x 8 x half> %0, half %1, <vscale x 8 x half> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmadd_vf_nxv8f16_f16_nxv8f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.h.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e16,m2,ta,mu
+; CHECK-NEXT: vfmadd.vf v16, ft0, v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x half> @llvm.riscv.vfmadd.nxv8f16.f16(
+ <vscale x 8 x half> %0,
+ half %1,
+ <vscale x 8 x half> %2,
+ i64 %3)
+
+ ret <vscale x 8 x half> %a
+}
+
+declare <vscale x 8 x half> @llvm.riscv.vfmadd.mask.nxv8f16.f16(
+ <vscale x 8 x half>,
+ half,
+ <vscale x 8 x half>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x half> @intrinsic_vfmadd_mask_vf_nxv8f16_f16_nxv8f16(<vscale x 8 x half> %0, half %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv8f16_f16_nxv8f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.h.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e16,m2,ta,mu
+; CHECK-NEXT: vfmadd.vf v16, ft0, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x half> @llvm.riscv.vfmadd.mask.nxv8f16.f16(
+ <vscale x 8 x half> %0,
+ half %1,
+ <vscale x 8 x half> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x half> %a
+}
+
+declare <vscale x 16 x half> @llvm.riscv.vfmadd.nxv16f16.f16(
+ <vscale x 16 x half>,
+ half,
+ <vscale x 16 x half>,
+ i64);
+
+define <vscale x 16 x half> @intrinsic_vfmadd_vf_nxv16f16_f16_nxv16f16(<vscale x 16 x half> %0, half %1, <vscale x 16 x half> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmadd_vf_nxv16f16_f16_nxv16f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.h.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu
+; CHECK-NEXT: vfmadd.vf v16, ft0, v20
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x half> @llvm.riscv.vfmadd.nxv16f16.f16(
+ <vscale x 16 x half> %0,
+ half %1,
+ <vscale x 16 x half> %2,
+ i64 %3)
+
+ ret <vscale x 16 x half> %a
+}
+
+declare <vscale x 16 x half> @llvm.riscv.vfmadd.mask.nxv16f16.f16(
+ <vscale x 16 x half>,
+ half,
+ <vscale x 16 x half>,
+ <vscale x 16 x i1>,
+ i64);
+
+define <vscale x 16 x half> @intrinsic_vfmadd_mask_vf_nxv16f16_f16_nxv16f16(<vscale x 16 x half> %0, half %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv16f16_f16_nxv16f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.h.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu
+; CHECK-NEXT: vfmadd.vf v16, ft0, v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x half> @llvm.riscv.vfmadd.mask.nxv16f16.f16(
+ <vscale x 16 x half> %0,
+ half %1,
+ <vscale x 16 x half> %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 16 x half> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vfmadd.nxv1f32.f32(
+ <vscale x 1 x float>,
+ float,
+ <vscale x 1 x float>,
+ i64);
+
+define <vscale x 1 x float> @intrinsic_vfmadd_vf_nxv1f32_f32_nxv1f32(<vscale x 1 x float> %0, float %1, <vscale x 1 x float> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmadd_vf_nxv1f32_f32_nxv1f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.w.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e32,mf2,ta,mu
+; CHECK-NEXT: vfmadd.vf v16, ft0, v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vfmadd.nxv1f32.f32(
+ <vscale x 1 x float> %0,
+ float %1,
+ <vscale x 1 x float> %2,
+ i64 %3)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vfmadd.mask.nxv1f32.f32(
+ <vscale x 1 x float>,
+ float,
+ <vscale x 1 x float>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x float> @intrinsic_vfmadd_mask_vf_nxv1f32_f32_nxv1f32(<vscale x 1 x float> %0, float %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv1f32_f32_nxv1f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.w.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e32,mf2,ta,mu
+; CHECK-NEXT: vfmadd.vf v16, ft0, v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vfmadd.mask.nxv1f32.f32(
+ <vscale x 1 x float> %0,
+ float %1,
+ <vscale x 1 x float> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfmadd.nxv2f32.f32(
+ <vscale x 2 x float>,
+ float,
+ <vscale x 2 x float>,
+ i64);
+
+define <vscale x 2 x float> @intrinsic_vfmadd_vf_nxv2f32_f32_nxv2f32(<vscale x 2 x float> %0, float %1, <vscale x 2 x float> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmadd_vf_nxv2f32_f32_nxv2f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.w.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e32,m1,ta,mu
+; CHECK-NEXT: vfmadd.vf v16, ft0, v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x float> @llvm.riscv.vfmadd.nxv2f32.f32(
+ <vscale x 2 x float> %0,
+ float %1,
+ <vscale x 2 x float> %2,
+ i64 %3)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfmadd.mask.nxv2f32.f32(
+ <vscale x 2 x float>,
+ float,
+ <vscale x 2 x float>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x float> @intrinsic_vfmadd_mask_vf_nxv2f32_f32_nxv2f32(<vscale x 2 x float> %0, float %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv2f32_f32_nxv2f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.w.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e32,m1,ta,mu
+; CHECK-NEXT: vfmadd.vf v16, ft0, v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x float> @llvm.riscv.vfmadd.mask.nxv2f32.f32(
+ <vscale x 2 x float> %0,
+ float %1,
+ <vscale x 2 x float> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vfmadd.nxv4f32.f32(
+ <vscale x 4 x float>,
+ float,
+ <vscale x 4 x float>,
+ i64);
+
+define <vscale x 4 x float> @intrinsic_vfmadd_vf_nxv4f32_f32_nxv4f32(<vscale x 4 x float> %0, float %1, <vscale x 4 x float> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmadd_vf_nxv4f32_f32_nxv4f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.w.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e32,m2,ta,mu
+; CHECK-NEXT: vfmadd.vf v16, ft0, v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x float> @llvm.riscv.vfmadd.nxv4f32.f32(
+ <vscale x 4 x float> %0,
+ float %1,
+ <vscale x 4 x float> %2,
+ i64 %3)
+
+ ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vfmadd.mask.nxv4f32.f32(
+ <vscale x 4 x float>,
+ float,
+ <vscale x 4 x float>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x float> @intrinsic_vfmadd_mask_vf_nxv4f32_f32_nxv4f32(<vscale x 4 x float> %0, float %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv4f32_f32_nxv4f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.w.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e32,m2,ta,mu
+; CHECK-NEXT: vfmadd.vf v16, ft0, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x float> @llvm.riscv.vfmadd.mask.nxv4f32.f32(
+ <vscale x 4 x float> %0,
+ float %1,
+ <vscale x 4 x float> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vfmadd.nxv8f32.f32(
+ <vscale x 8 x float>,
+ float,
+ <vscale x 8 x float>,
+ i64);
+
+define <vscale x 8 x float> @intrinsic_vfmadd_vf_nxv8f32_f32_nxv8f32(<vscale x 8 x float> %0, float %1, <vscale x 8 x float> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmadd_vf_nxv8f32_f32_nxv8f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.w.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu
+; CHECK-NEXT: vfmadd.vf v16, ft0, v20
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x float> @llvm.riscv.vfmadd.nxv8f32.f32(
+ <vscale x 8 x float> %0,
+ float %1,
+ <vscale x 8 x float> %2,
+ i64 %3)
+
+ ret <vscale x 8 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vfmadd.mask.nxv8f32.f32(
+ <vscale x 8 x float>,
+ float,
+ <vscale x 8 x float>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x float> @intrinsic_vfmadd_mask_vf_nxv8f32_f32_nxv8f32(<vscale x 8 x float> %0, float %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv8f32_f32_nxv8f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.w.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu
+; CHECK-NEXT: vfmadd.vf v16, ft0, v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x float> @llvm.riscv.vfmadd.mask.nxv8f32.f32(
+ <vscale x 8 x float> %0,
+ float %1,
+ <vscale x 8 x float> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x float> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vfmadd.nxv1f64.f64(
+ <vscale x 1 x double>,
+ double,
+ <vscale x 1 x double>,
+ i64);
+
+define <vscale x 1 x double> @intrinsic_vfmadd_vf_nxv1f64_f64_nxv1f64(<vscale x 1 x double> %0, double %1, <vscale x 1 x double> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmadd_vf_nxv1f64_f64_nxv1f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.d.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e64,m1,ta,mu
+; CHECK-NEXT: vfmadd.vf v16, ft0, v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x double> @llvm.riscv.vfmadd.nxv1f64.f64(
+ <vscale x 1 x double> %0,
+ double %1,
+ <vscale x 1 x double> %2,
+ i64 %3)
+
+ ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vfmadd.mask.nxv1f64.f64(
+ <vscale x 1 x double>,
+ double,
+ <vscale x 1 x double>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x double> @intrinsic_vfmadd_mask_vf_nxv1f64_f64_nxv1f64(<vscale x 1 x double> %0, double %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv1f64_f64_nxv1f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.d.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e64,m1,ta,mu
+; CHECK-NEXT: vfmadd.vf v16, ft0, v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x double> @llvm.riscv.vfmadd.mask.nxv1f64.f64(
+ <vscale x 1 x double> %0,
+ double %1,
+ <vscale x 1 x double> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vfmadd.nxv2f64.f64(
+ <vscale x 2 x double>,
+ double,
+ <vscale x 2 x double>,
+ i64);
+
+define <vscale x 2 x double> @intrinsic_vfmadd_vf_nxv2f64_f64_nxv2f64(<vscale x 2 x double> %0, double %1, <vscale x 2 x double> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmadd_vf_nxv2f64_f64_nxv2f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.d.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e64,m2,ta,mu
+; CHECK-NEXT: vfmadd.vf v16, ft0, v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x double> @llvm.riscv.vfmadd.nxv2f64.f64(
+ <vscale x 2 x double> %0,
+ double %1,
+ <vscale x 2 x double> %2,
+ i64 %3)
+
+ ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vfmadd.mask.nxv2f64.f64(
+ <vscale x 2 x double>,
+ double,
+ <vscale x 2 x double>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x double> @intrinsic_vfmadd_mask_vf_nxv2f64_f64_nxv2f64(<vscale x 2 x double> %0, double %1, <vscale x 2 x double> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv2f64_f64_nxv2f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.d.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e64,m2,ta,mu
+; CHECK-NEXT: vfmadd.vf v16, ft0, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x double> @llvm.riscv.vfmadd.mask.nxv2f64.f64(
+ <vscale x 2 x double> %0,
+ double %1,
+ <vscale x 2 x double> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vfmadd.nxv4f64.f64(
+ <vscale x 4 x double>,
+ double,
+ <vscale x 4 x double>,
+ i64);
+
+define <vscale x 4 x double> @intrinsic_vfmadd_vf_nxv4f64_f64_nxv4f64(<vscale x 4 x double> %0, double %1, <vscale x 4 x double> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmadd_vf_nxv4f64_f64_nxv4f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.d.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e64,m4,ta,mu
+; CHECK-NEXT: vfmadd.vf v16, ft0, v20
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x double> @llvm.riscv.vfmadd.nxv4f64.f64(
+ <vscale x 4 x double> %0,
+ double %1,
+ <vscale x 4 x double> %2,
+ i64 %3)
+
+ ret <vscale x 4 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vfmadd.mask.nxv4f64.f64(
+ <vscale x 4 x double>,
+ double,
+ <vscale x 4 x double>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x double> @intrinsic_vfmadd_mask_vf_nxv4f64_f64_nxv4f64(<vscale x 4 x double> %0, double %1, <vscale x 4 x double> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv4f64_f64_nxv4f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.d.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e64,m4,ta,mu
+; CHECK-NEXT: vfmadd.vf v16, ft0, v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x double> @llvm.riscv.vfmadd.mask.nxv4f64.f64(
+ <vscale x 4 x double> %0,
+ double %1,
+ <vscale x 4 x double> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x double> %a
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmsac-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfmsac-rv32.ll
new file mode 100644
index 000000000000..d86f9462c6f2
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vfmsac-rv32.ll
@@ -0,0 +1,856 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f,+experimental-zfh -verify-machineinstrs \
+; RUN: --riscv-no-aliases < %s | FileCheck %s
+declare <vscale x 1 x half> @llvm.riscv.vfmsac.nxv1f16.nxv1f16(
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ i32);
+
+define <vscale x 1 x half> @intrinsic_vfmsac_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmsac_vv_nxv1f16_nxv1f16_nxv1f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu
+; CHECK-NEXT: vfmsac.vv v16, v17, v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x half> @llvm.riscv.vfmsac.nxv1f16.nxv1f16(
+ <vscale x 1 x half> %0,
+ <vscale x 1 x half> %1,
+ <vscale x 1 x half> %2,
+ i32 %3)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vfmsac.mask.nxv1f16.nxv1f16(
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x half> @intrinsic_vfmsac_mask_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv1f16_nxv1f16_nxv1f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu
+; CHECK-NEXT: vfmsac.vv v16, v17, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x half> @llvm.riscv.vfmsac.mask.nxv1f16.nxv1f16(
+ <vscale x 1 x half> %0,
+ <vscale x 1 x half> %1,
+ <vscale x 1 x half> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 2 x half> @llvm.riscv.vfmsac.nxv2f16.nxv2f16(
+ <vscale x 2 x half>,
+ <vscale x 2 x half>,
+ <vscale x 2 x half>,
+ i32);
+
+define <vscale x 2 x half> @intrinsic_vfmsac_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmsac_vv_nxv2f16_nxv2f16_nxv2f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu
+; CHECK-NEXT: vfmsac.vv v16, v17, v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x half> @llvm.riscv.vfmsac.nxv2f16.nxv2f16(
+ <vscale x 2 x half> %0,
+ <vscale x 2 x half> %1,
+ <vscale x 2 x half> %2,
+ i32 %3)
+
+ ret <vscale x 2 x half> %a
+}
+
+declare <vscale x 2 x half> @llvm.riscv.vfmsac.mask.nxv2f16.nxv2f16(
+ <vscale x 2 x half>,
+ <vscale x 2 x half>,
+ <vscale x 2 x half>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x half> @intrinsic_vfmsac_mask_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv2f16_nxv2f16_nxv2f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu
+; CHECK-NEXT: vfmsac.vv v16, v17, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x half> @llvm.riscv.vfmsac.mask.nxv2f16.nxv2f16(
+ <vscale x 2 x half> %0,
+ <vscale x 2 x half> %1,
+ <vscale x 2 x half> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vfmsac.nxv4f16.nxv4f16(
+ <vscale x 4 x half>,
+ <vscale x 4 x half>,
+ <vscale x 4 x half>,
+ i32);
+
+define <vscale x 4 x half> @intrinsic_vfmsac_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmsac_vv_nxv4f16_nxv4f16_nxv4f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu
+; CHECK-NEXT: vfmsac.vv v16, v17, v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x half> @llvm.riscv.vfmsac.nxv4f16.nxv4f16(
+ <vscale x 4 x half> %0,
+ <vscale x 4 x half> %1,
+ <vscale x 4 x half> %2,
+ i32 %3)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vfmsac.mask.nxv4f16.nxv4f16(
+ <vscale x 4 x half>,
+ <vscale x 4 x half>,
+ <vscale x 4 x half>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x half> @intrinsic_vfmsac_mask_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv4f16_nxv4f16_nxv4f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu
+; CHECK-NEXT: vfmsac.vv v16, v17, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x half> @llvm.riscv.vfmsac.mask.nxv4f16.nxv4f16(
+ <vscale x 4 x half> %0,
+ <vscale x 4 x half> %1,
+ <vscale x 4 x half> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 8 x half> @llvm.riscv.vfmsac.nxv8f16.nxv8f16(
+ <vscale x 8 x half>,
+ <vscale x 8 x half>,
+ <vscale x 8 x half>,
+ i32);
+
+define <vscale x 8 x half> @intrinsic_vfmsac_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmsac_vv_nxv8f16_nxv8f16_nxv8f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu
+; CHECK-NEXT: vfmsac.vv v16, v18, v20
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x half> @llvm.riscv.vfmsac.nxv8f16.nxv8f16(
+ <vscale x 8 x half> %0,
+ <vscale x 8 x half> %1,
+ <vscale x 8 x half> %2,
+ i32 %3)
+
+ ret <vscale x 8 x half> %a
+}
+
+declare <vscale x 8 x half> @llvm.riscv.vfmsac.mask.nxv8f16.nxv8f16(
+ <vscale x 8 x half>,
+ <vscale x 8 x half>,
+ <vscale x 8 x half>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x half> @intrinsic_vfmsac_mask_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv8f16_nxv8f16_nxv8f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu
+; CHECK-NEXT: vfmsac.vv v16, v18, v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x half> @llvm.riscv.vfmsac.mask.nxv8f16.nxv8f16(
+ <vscale x 8 x half> %0,
+ <vscale x 8 x half> %1,
+ <vscale x 8 x half> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x half> %a
+}
+
+declare <vscale x 16 x half> @llvm.riscv.vfmsac.nxv16f16.nxv16f16(
+ <vscale x 16 x half>,
+ <vscale x 16 x half>,
+ <vscale x 16 x half>,
+ i32);
+
+define <vscale x 16 x half> @intrinsic_vfmsac_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmsac_vv_nxv16f16_nxv16f16_nxv16f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, zero, e16,m4,ta,mu
+; CHECK-NEXT: vle16.v v28, (a0)
+; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu
+; CHECK-NEXT: vfmsac.vv v16, v20, v28
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x half> @llvm.riscv.vfmsac.nxv16f16.nxv16f16(
+ <vscale x 16 x half> %0,
+ <vscale x 16 x half> %1,
+ <vscale x 16 x half> %2,
+ i32 %3)
+
+ ret <vscale x 16 x half> %a
+}
+
+declare <vscale x 16 x half> @llvm.riscv.vfmsac.mask.nxv16f16.nxv16f16(
+ <vscale x 16 x half>,
+ <vscale x 16 x half>,
+ <vscale x 16 x half>,
+ <vscale x 16 x i1>,
+ i32);
+
+define <vscale x 16 x half> @intrinsic_vfmsac_mask_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv16f16_nxv16f16_nxv16f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, zero, e16,m4,ta,mu
+; CHECK-NEXT: vle16.v v28, (a0)
+; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu
+; CHECK-NEXT: vfmsac.vv v16, v20, v28, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x half> @llvm.riscv.vfmsac.mask.nxv16f16.nxv16f16(
+ <vscale x 16 x half> %0,
+ <vscale x 16 x half> %1,
+ <vscale x 16 x half> %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 16 x half> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vfmsac.nxv1f32.nxv1f32(
+ <vscale x 1 x float>,
+ <vscale x 1 x float>,
+ <vscale x 1 x float>,
+ i32);
+
+define <vscale x 1 x float> @intrinsic_vfmsac_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmsac_vv_nxv1f32_nxv1f32_nxv1f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu
+; CHECK-NEXT: vfmsac.vv v16, v17, v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vfmsac.nxv1f32.nxv1f32(
+ <vscale x 1 x float> %0,
+ <vscale x 1 x float> %1,
+ <vscale x 1 x float> %2,
+ i32 %3)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vfmsac.mask.nxv1f32.nxv1f32(
+ <vscale x 1 x float>,
+ <vscale x 1 x float>,
+ <vscale x 1 x float>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x float> @intrinsic_vfmsac_mask_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv1f32_nxv1f32_nxv1f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu
+; CHECK-NEXT: vfmsac.vv v16, v17, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vfmsac.mask.nxv1f32.nxv1f32(
+ <vscale x 1 x float> %0,
+ <vscale x 1 x float> %1,
+ <vscale x 1 x float> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfmsac.nxv2f32.nxv2f32(
+ <vscale x 2 x float>,
+ <vscale x 2 x float>,
+ <vscale x 2 x float>,
+ i32);
+
+define <vscale x 2 x float> @intrinsic_vfmsac_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmsac_vv_nxv2f32_nxv2f32_nxv2f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu
+; CHECK-NEXT: vfmsac.vv v16, v17, v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x float> @llvm.riscv.vfmsac.nxv2f32.nxv2f32(
+ <vscale x 2 x float> %0,
+ <vscale x 2 x float> %1,
+ <vscale x 2 x float> %2,
+ i32 %3)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfmsac.mask.nxv2f32.nxv2f32(
+ <vscale x 2 x float>,
+ <vscale x 2 x float>,
+ <vscale x 2 x float>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x float> @intrinsic_vfmsac_mask_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv2f32_nxv2f32_nxv2f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu
+; CHECK-NEXT: vfmsac.vv v16, v17, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x float> @llvm.riscv.vfmsac.mask.nxv2f32.nxv2f32(
+ <vscale x 2 x float> %0,
+ <vscale x 2 x float> %1,
+ <vscale x 2 x float> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vfmsac.nxv4f32.nxv4f32(
+ <vscale x 4 x float>,
+ <vscale x 4 x float>,
+ <vscale x 4 x float>,
+ i32);
+
+define <vscale x 4 x float> @intrinsic_vfmsac_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmsac_vv_nxv4f32_nxv4f32_nxv4f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu
+; CHECK-NEXT: vfmsac.vv v16, v18, v20
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x float> @llvm.riscv.vfmsac.nxv4f32.nxv4f32(
+ <vscale x 4 x float> %0,
+ <vscale x 4 x float> %1,
+ <vscale x 4 x float> %2,
+ i32 %3)
+
+ ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vfmsac.mask.nxv4f32.nxv4f32(
+ <vscale x 4 x float>,
+ <vscale x 4 x float>,
+ <vscale x 4 x float>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x float> @intrinsic_vfmsac_mask_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv4f32_nxv4f32_nxv4f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu
+; CHECK-NEXT: vfmsac.vv v16, v18, v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x float> @llvm.riscv.vfmsac.mask.nxv4f32.nxv4f32(
+ <vscale x 4 x float> %0,
+ <vscale x 4 x float> %1,
+ <vscale x 4 x float> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vfmsac.nxv8f32.nxv8f32(
+ <vscale x 8 x float>,
+ <vscale x 8 x float>,
+ <vscale x 8 x float>,
+ i32);
+
+define <vscale x 8 x float> @intrinsic_vfmsac_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmsac_vv_nxv8f32_nxv8f32_nxv8f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, zero, e32,m4,ta,mu
+; CHECK-NEXT: vle32.v v28, (a0)
+; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu
+; CHECK-NEXT: vfmsac.vv v16, v20, v28
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x float> @llvm.riscv.vfmsac.nxv8f32.nxv8f32(
+ <vscale x 8 x float> %0,
+ <vscale x 8 x float> %1,
+ <vscale x 8 x float> %2,
+ i32 %3)
+
+ ret <vscale x 8 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vfmsac.mask.nxv8f32.nxv8f32(
+ <vscale x 8 x float>,
+ <vscale x 8 x float>,
+ <vscale x 8 x float>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x float> @intrinsic_vfmsac_mask_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv8f32_nxv8f32_nxv8f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, zero, e32,m4,ta,mu
+; CHECK-NEXT: vle32.v v28, (a0)
+; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu
+; CHECK-NEXT: vfmsac.vv v16, v20, v28, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x float> @llvm.riscv.vfmsac.mask.nxv8f32.nxv8f32(
+ <vscale x 8 x float> %0,
+ <vscale x 8 x float> %1,
+ <vscale x 8 x float> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x float> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vfmsac.nxv1f16.f16(
+ <vscale x 1 x half>,
+ half,
+ <vscale x 1 x half>,
+ i32);
+
+define <vscale x 1 x half> @intrinsic_vfmsac_vf_nxv1f16_f16_nxv1f16(<vscale x 1 x half> %0, half %1, <vscale x 1 x half> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmsac_vf_nxv1f16_f16_nxv1f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.h.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e16,mf4,ta,mu
+; CHECK-NEXT: vfmsac.vf v16, ft0, v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x half> @llvm.riscv.vfmsac.nxv1f16.f16(
+ <vscale x 1 x half> %0,
+ half %1,
+ <vscale x 1 x half> %2,
+ i32 %3)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vfmsac.mask.nxv1f16.f16(
+ <vscale x 1 x half>,
+ half,
+ <vscale x 1 x half>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x half> @intrinsic_vfmsac_mask_vf_nxv1f16_f16_nxv1f16(<vscale x 1 x half> %0, half %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv1f16_f16_nxv1f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.h.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e16,mf4,ta,mu
+; CHECK-NEXT: vfmsac.vf v16, ft0, v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x half> @llvm.riscv.vfmsac.mask.nxv1f16.f16(
+ <vscale x 1 x half> %0,
+ half %1,
+ <vscale x 1 x half> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 2 x half> @llvm.riscv.vfmsac.nxv2f16.f16(
+ <vscale x 2 x half>,
+ half,
+ <vscale x 2 x half>,
+ i32);
+
+define <vscale x 2 x half> @intrinsic_vfmsac_vf_nxv2f16_f16_nxv2f16(<vscale x 2 x half> %0, half %1, <vscale x 2 x half> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmsac_vf_nxv2f16_f16_nxv2f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.h.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e16,mf2,ta,mu
+; CHECK-NEXT: vfmsac.vf v16, ft0, v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x half> @llvm.riscv.vfmsac.nxv2f16.f16(
+ <vscale x 2 x half> %0,
+ half %1,
+ <vscale x 2 x half> %2,
+ i32 %3)
+
+ ret <vscale x 2 x half> %a
+}
+
+declare <vscale x 2 x half> @llvm.riscv.vfmsac.mask.nxv2f16.f16(
+ <vscale x 2 x half>,
+ half,
+ <vscale x 2 x half>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x half> @intrinsic_vfmsac_mask_vf_nxv2f16_f16_nxv2f16(<vscale x 2 x half> %0, half %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv2f16_f16_nxv2f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.h.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e16,mf2,ta,mu
+; CHECK-NEXT: vfmsac.vf v16, ft0, v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x half> @llvm.riscv.vfmsac.mask.nxv2f16.f16(
+ <vscale x 2 x half> %0,
+ half %1,
+ <vscale x 2 x half> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vfmsac.nxv4f16.f16(
+ <vscale x 4 x half>,
+ half,
+ <vscale x 4 x half>,
+ i32);
+
+define <vscale x 4 x half> @intrinsic_vfmsac_vf_nxv4f16_f16_nxv4f16(<vscale x 4 x half> %0, half %1, <vscale x 4 x half> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmsac_vf_nxv4f16_f16_nxv4f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.h.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e16,m1,ta,mu
+; CHECK-NEXT: vfmsac.vf v16, ft0, v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x half> @llvm.riscv.vfmsac.nxv4f16.f16(
+ <vscale x 4 x half> %0,
+ half %1,
+ <vscale x 4 x half> %2,
+ i32 %3)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vfmsac.mask.nxv4f16.f16(
+ <vscale x 4 x half>,
+ half,
+ <vscale x 4 x half>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x half> @intrinsic_vfmsac_mask_vf_nxv4f16_f16_nxv4f16(<vscale x 4 x half> %0, half %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv4f16_f16_nxv4f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.h.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e16,m1,ta,mu
+; CHECK-NEXT: vfmsac.vf v16, ft0, v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x half> @llvm.riscv.vfmsac.mask.nxv4f16.f16(
+ <vscale x 4 x half> %0,
+ half %1,
+ <vscale x 4 x half> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 8 x half> @llvm.riscv.vfmsac.nxv8f16.f16(
+ <vscale x 8 x half>,
+ half,
+ <vscale x 8 x half>,
+ i32);
+
+define <vscale x 8 x half> @intrinsic_vfmsac_vf_nxv8f16_f16_nxv8f16(<vscale x 8 x half> %0, half %1, <vscale x 8 x half> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmsac_vf_nxv8f16_f16_nxv8f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.h.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e16,m2,ta,mu
+; CHECK-NEXT: vfmsac.vf v16, ft0, v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x half> @llvm.riscv.vfmsac.nxv8f16.f16(
+ <vscale x 8 x half> %0,
+ half %1,
+ <vscale x 8 x half> %2,
+ i32 %3)
+
+ ret <vscale x 8 x half> %a
+}
+
+declare <vscale x 8 x half> @llvm.riscv.vfmsac.mask.nxv8f16.f16(
+ <vscale x 8 x half>,
+ half,
+ <vscale x 8 x half>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x half> @intrinsic_vfmsac_mask_vf_nxv8f16_f16_nxv8f16(<vscale x 8 x half> %0, half %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv8f16_f16_nxv8f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.h.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e16,m2,ta,mu
+; CHECK-NEXT: vfmsac.vf v16, ft0, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x half> @llvm.riscv.vfmsac.mask.nxv8f16.f16(
+ <vscale x 8 x half> %0,
+ half %1,
+ <vscale x 8 x half> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x half> %a
+}
+
+declare <vscale x 16 x half> @llvm.riscv.vfmsac.nxv16f16.f16(
+ <vscale x 16 x half>,
+ half,
+ <vscale x 16 x half>,
+ i32);
+
+define <vscale x 16 x half> @intrinsic_vfmsac_vf_nxv16f16_f16_nxv16f16(<vscale x 16 x half> %0, half %1, <vscale x 16 x half> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmsac_vf_nxv16f16_f16_nxv16f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.h.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu
+; CHECK-NEXT: vfmsac.vf v16, ft0, v20
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x half> @llvm.riscv.vfmsac.nxv16f16.f16(
+ <vscale x 16 x half> %0,
+ half %1,
+ <vscale x 16 x half> %2,
+ i32 %3)
+
+ ret <vscale x 16 x half> %a
+}
+
+declare <vscale x 16 x half> @llvm.riscv.vfmsac.mask.nxv16f16.f16(
+ <vscale x 16 x half>,
+ half,
+ <vscale x 16 x half>,
+ <vscale x 16 x i1>,
+ i32);
+
+define <vscale x 16 x half> @intrinsic_vfmsac_mask_vf_nxv16f16_f16_nxv16f16(<vscale x 16 x half> %0, half %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv16f16_f16_nxv16f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.h.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu
+; CHECK-NEXT: vfmsac.vf v16, ft0, v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x half> @llvm.riscv.vfmsac.mask.nxv16f16.f16(
+ <vscale x 16 x half> %0,
+ half %1,
+ <vscale x 16 x half> %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 16 x half> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vfmsac.nxv1f32.f32(
+ <vscale x 1 x float>,
+ float,
+ <vscale x 1 x float>,
+ i32);
+
+define <vscale x 1 x float> @intrinsic_vfmsac_vf_nxv1f32_f32_nxv1f32(<vscale x 1 x float> %0, float %1, <vscale x 1 x float> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmsac_vf_nxv1f32_f32_nxv1f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.w.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e32,mf2,ta,mu
+; CHECK-NEXT: vfmsac.vf v16, ft0, v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vfmsac.nxv1f32.f32(
+ <vscale x 1 x float> %0,
+ float %1,
+ <vscale x 1 x float> %2,
+ i32 %3)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vfmsac.mask.nxv1f32.f32(
+ <vscale x 1 x float>,
+ float,
+ <vscale x 1 x float>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x float> @intrinsic_vfmsac_mask_vf_nxv1f32_f32_nxv1f32(<vscale x 1 x float> %0, float %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv1f32_f32_nxv1f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.w.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e32,mf2,ta,mu
+; CHECK-NEXT: vfmsac.vf v16, ft0, v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vfmsac.mask.nxv1f32.f32(
+ <vscale x 1 x float> %0,
+ float %1,
+ <vscale x 1 x float> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfmsac.nxv2f32.f32(
+ <vscale x 2 x float>,
+ float,
+ <vscale x 2 x float>,
+ i32);
+
+define <vscale x 2 x float> @intrinsic_vfmsac_vf_nxv2f32_f32_nxv2f32(<vscale x 2 x float> %0, float %1, <vscale x 2 x float> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmsac_vf_nxv2f32_f32_nxv2f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.w.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e32,m1,ta,mu
+; CHECK-NEXT: vfmsac.vf v16, ft0, v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x float> @llvm.riscv.vfmsac.nxv2f32.f32(
+ <vscale x 2 x float> %0,
+ float %1,
+ <vscale x 2 x float> %2,
+ i32 %3)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfmsac.mask.nxv2f32.f32(
+ <vscale x 2 x float>,
+ float,
+ <vscale x 2 x float>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x float> @intrinsic_vfmsac_mask_vf_nxv2f32_f32_nxv2f32(<vscale x 2 x float> %0, float %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv2f32_f32_nxv2f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.w.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e32,m1,ta,mu
+; CHECK-NEXT: vfmsac.vf v16, ft0, v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x float> @llvm.riscv.vfmsac.mask.nxv2f32.f32(
+ <vscale x 2 x float> %0,
+ float %1,
+ <vscale x 2 x float> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vfmsac.nxv4f32.f32(
+ <vscale x 4 x float>,
+ float,
+ <vscale x 4 x float>,
+ i32);
+
+define <vscale x 4 x float> @intrinsic_vfmsac_vf_nxv4f32_f32_nxv4f32(<vscale x 4 x float> %0, float %1, <vscale x 4 x float> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmsac_vf_nxv4f32_f32_nxv4f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.w.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e32,m2,ta,mu
+; CHECK-NEXT: vfmsac.vf v16, ft0, v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x float> @llvm.riscv.vfmsac.nxv4f32.f32(
+ <vscale x 4 x float> %0,
+ float %1,
+ <vscale x 4 x float> %2,
+ i32 %3)
+
+ ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vfmsac.mask.nxv4f32.f32(
+ <vscale x 4 x float>,
+ float,
+ <vscale x 4 x float>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x float> @intrinsic_vfmsac_mask_vf_nxv4f32_f32_nxv4f32(<vscale x 4 x float> %0, float %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv4f32_f32_nxv4f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.w.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e32,m2,ta,mu
+; CHECK-NEXT: vfmsac.vf v16, ft0, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x float> @llvm.riscv.vfmsac.mask.nxv4f32.f32(
+ <vscale x 4 x float> %0,
+ float %1,
+ <vscale x 4 x float> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vfmsac.nxv8f32.f32(
+ <vscale x 8 x float>,
+ float,
+ <vscale x 8 x float>,
+ i32);
+
+define <vscale x 8 x float> @intrinsic_vfmsac_vf_nxv8f32_f32_nxv8f32(<vscale x 8 x float> %0, float %1, <vscale x 8 x float> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmsac_vf_nxv8f32_f32_nxv8f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.w.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu
+; CHECK-NEXT: vfmsac.vf v16, ft0, v20
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x float> @llvm.riscv.vfmsac.nxv8f32.f32(
+ <vscale x 8 x float> %0,
+ float %1,
+ <vscale x 8 x float> %2,
+ i32 %3)
+
+ ret <vscale x 8 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vfmsac.mask.nxv8f32.f32(
+ <vscale x 8 x float>,
+ float,
+ <vscale x 8 x float>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x float> @intrinsic_vfmsac_mask_vf_nxv8f32_f32_nxv8f32(<vscale x 8 x float> %0, float %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv8f32_f32_nxv8f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.w.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu
+; CHECK-NEXT: vfmsac.vf v16, ft0, v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x float> @llvm.riscv.vfmsac.mask.nxv8f32.f32(
+ <vscale x 8 x float> %0,
+ float %1,
+ <vscale x 8 x float> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x float> %a
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmsac-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfmsac-rv64.ll
new file mode 100644
index 000000000000..03364aba8430
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vfmsac-rv64.ll
@@ -0,0 +1,1142 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
+; RUN: --riscv-no-aliases < %s | FileCheck %s
+declare <vscale x 1 x half> @llvm.riscv.vfmsac.nxv1f16.nxv1f16(
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ i64);
+
+define <vscale x 1 x half> @intrinsic_vfmsac_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmsac_vv_nxv1f16_nxv1f16_nxv1f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu
+; CHECK-NEXT: vfmsac.vv v16, v17, v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x half> @llvm.riscv.vfmsac.nxv1f16.nxv1f16(
+ <vscale x 1 x half> %0,
+ <vscale x 1 x half> %1,
+ <vscale x 1 x half> %2,
+ i64 %3)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vfmsac.mask.nxv1f16.nxv1f16(
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x half> @intrinsic_vfmsac_mask_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv1f16_nxv1f16_nxv1f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu
+; CHECK-NEXT: vfmsac.vv v16, v17, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x half> @llvm.riscv.vfmsac.mask.nxv1f16.nxv1f16(
+ <vscale x 1 x half> %0,
+ <vscale x 1 x half> %1,
+ <vscale x 1 x half> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 2 x half> @llvm.riscv.vfmsac.nxv2f16.nxv2f16(
+ <vscale x 2 x half>,
+ <vscale x 2 x half>,
+ <vscale x 2 x half>,
+ i64);
+
+define <vscale x 2 x half> @intrinsic_vfmsac_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmsac_vv_nxv2f16_nxv2f16_nxv2f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu
+; CHECK-NEXT: vfmsac.vv v16, v17, v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x half> @llvm.riscv.vfmsac.nxv2f16.nxv2f16(
+ <vscale x 2 x half> %0,
+ <vscale x 2 x half> %1,
+ <vscale x 2 x half> %2,
+ i64 %3)
+
+ ret <vscale x 2 x half> %a
+}
+
+declare <vscale x 2 x half> @llvm.riscv.vfmsac.mask.nxv2f16.nxv2f16(
+ <vscale x 2 x half>,
+ <vscale x 2 x half>,
+ <vscale x 2 x half>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x half> @intrinsic_vfmsac_mask_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv2f16_nxv2f16_nxv2f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu
+; CHECK-NEXT: vfmsac.vv v16, v17, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x half> @llvm.riscv.vfmsac.mask.nxv2f16.nxv2f16(
+ <vscale x 2 x half> %0,
+ <vscale x 2 x half> %1,
+ <vscale x 2 x half> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vfmsac.nxv4f16.nxv4f16(
+ <vscale x 4 x half>,
+ <vscale x 4 x half>,
+ <vscale x 4 x half>,
+ i64);
+
+define <vscale x 4 x half> @intrinsic_vfmsac_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmsac_vv_nxv4f16_nxv4f16_nxv4f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu
+; CHECK-NEXT: vfmsac.vv v16, v17, v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x half> @llvm.riscv.vfmsac.nxv4f16.nxv4f16(
+ <vscale x 4 x half> %0,
+ <vscale x 4 x half> %1,
+ <vscale x 4 x half> %2,
+ i64 %3)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vfmsac.mask.nxv4f16.nxv4f16(
+ <vscale x 4 x half>,
+ <vscale x 4 x half>,
+ <vscale x 4 x half>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x half> @intrinsic_vfmsac_mask_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv4f16_nxv4f16_nxv4f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu
+; CHECK-NEXT: vfmsac.vv v16, v17, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x half> @llvm.riscv.vfmsac.mask.nxv4f16.nxv4f16(
+ <vscale x 4 x half> %0,
+ <vscale x 4 x half> %1,
+ <vscale x 4 x half> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 8 x half> @llvm.riscv.vfmsac.nxv8f16.nxv8f16(
+ <vscale x 8 x half>,
+ <vscale x 8 x half>,
+ <vscale x 8 x half>,
+ i64);
+
+define <vscale x 8 x half> @intrinsic_vfmsac_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmsac_vv_nxv8f16_nxv8f16_nxv8f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu
+; CHECK-NEXT: vfmsac.vv v16, v18, v20
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x half> @llvm.riscv.vfmsac.nxv8f16.nxv8f16(
+ <vscale x 8 x half> %0,
+ <vscale x 8 x half> %1,
+ <vscale x 8 x half> %2,
+ i64 %3)
+
+ ret <vscale x 8 x half> %a
+}
+
+declare <vscale x 8 x half> @llvm.riscv.vfmsac.mask.nxv8f16.nxv8f16(
+ <vscale x 8 x half>,
+ <vscale x 8 x half>,
+ <vscale x 8 x half>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x half> @intrinsic_vfmsac_mask_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv8f16_nxv8f16_nxv8f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu
+; CHECK-NEXT: vfmsac.vv v16, v18, v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x half> @llvm.riscv.vfmsac.mask.nxv8f16.nxv8f16(
+ <vscale x 8 x half> %0,
+ <vscale x 8 x half> %1,
+ <vscale x 8 x half> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x half> %a
+}
+
+declare <vscale x 16 x half> @llvm.riscv.vfmsac.nxv16f16.nxv16f16(
+ <vscale x 16 x half>,
+ <vscale x 16 x half>,
+ <vscale x 16 x half>,
+ i64);
+
+define <vscale x 16 x half> @intrinsic_vfmsac_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmsac_vv_nxv16f16_nxv16f16_nxv16f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, zero, e16,m4,ta,mu
+; CHECK-NEXT: vle16.v v28, (a0)
+; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu
+; CHECK-NEXT: vfmsac.vv v16, v20, v28
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x half> @llvm.riscv.vfmsac.nxv16f16.nxv16f16(
+ <vscale x 16 x half> %0,
+ <vscale x 16 x half> %1,
+ <vscale x 16 x half> %2,
+ i64 %3)
+
+ ret <vscale x 16 x half> %a
+}
+
+declare <vscale x 16 x half> @llvm.riscv.vfmsac.mask.nxv16f16.nxv16f16(
+ <vscale x 16 x half>,
+ <vscale x 16 x half>,
+ <vscale x 16 x half>,
+ <vscale x 16 x i1>,
+ i64);
+
+define <vscale x 16 x half> @intrinsic_vfmsac_mask_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv16f16_nxv16f16_nxv16f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, zero, e16,m4,ta,mu
+; CHECK-NEXT: vle16.v v28, (a0)
+; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu
+; CHECK-NEXT: vfmsac.vv v16, v20, v28, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x half> @llvm.riscv.vfmsac.mask.nxv16f16.nxv16f16(
+ <vscale x 16 x half> %0,
+ <vscale x 16 x half> %1,
+ <vscale x 16 x half> %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 16 x half> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vfmsac.nxv1f32.nxv1f32(
+ <vscale x 1 x float>,
+ <vscale x 1 x float>,
+ <vscale x 1 x float>,
+ i64);
+
+define <vscale x 1 x float> @intrinsic_vfmsac_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmsac_vv_nxv1f32_nxv1f32_nxv1f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu
+; CHECK-NEXT: vfmsac.vv v16, v17, v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vfmsac.nxv1f32.nxv1f32(
+ <vscale x 1 x float> %0,
+ <vscale x 1 x float> %1,
+ <vscale x 1 x float> %2,
+ i64 %3)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vfmsac.mask.nxv1f32.nxv1f32(
+ <vscale x 1 x float>,
+ <vscale x 1 x float>,
+ <vscale x 1 x float>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x float> @intrinsic_vfmsac_mask_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv1f32_nxv1f32_nxv1f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu
+; CHECK-NEXT: vfmsac.vv v16, v17, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vfmsac.mask.nxv1f32.nxv1f32(
+ <vscale x 1 x float> %0,
+ <vscale x 1 x float> %1,
+ <vscale x 1 x float> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfmsac.nxv2f32.nxv2f32(
+ <vscale x 2 x float>,
+ <vscale x 2 x float>,
+ <vscale x 2 x float>,
+ i64);
+
+define <vscale x 2 x float> @intrinsic_vfmsac_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmsac_vv_nxv2f32_nxv2f32_nxv2f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu
+; CHECK-NEXT: vfmsac.vv v16, v17, v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x float> @llvm.riscv.vfmsac.nxv2f32.nxv2f32(
+ <vscale x 2 x float> %0,
+ <vscale x 2 x float> %1,
+ <vscale x 2 x float> %2,
+ i64 %3)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfmsac.mask.nxv2f32.nxv2f32(
+ <vscale x 2 x float>,
+ <vscale x 2 x float>,
+ <vscale x 2 x float>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x float> @intrinsic_vfmsac_mask_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv2f32_nxv2f32_nxv2f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu
+; CHECK-NEXT: vfmsac.vv v16, v17, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x float> @llvm.riscv.vfmsac.mask.nxv2f32.nxv2f32(
+ <vscale x 2 x float> %0,
+ <vscale x 2 x float> %1,
+ <vscale x 2 x float> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vfmsac.nxv4f32.nxv4f32(
+ <vscale x 4 x float>,
+ <vscale x 4 x float>,
+ <vscale x 4 x float>,
+ i64);
+
+define <vscale x 4 x float> @intrinsic_vfmsac_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmsac_vv_nxv4f32_nxv4f32_nxv4f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu
+; CHECK-NEXT: vfmsac.vv v16, v18, v20
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x float> @llvm.riscv.vfmsac.nxv4f32.nxv4f32(
+ <vscale x 4 x float> %0,
+ <vscale x 4 x float> %1,
+ <vscale x 4 x float> %2,
+ i64 %3)
+
+ ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vfmsac.mask.nxv4f32.nxv4f32(
+ <vscale x 4 x float>,
+ <vscale x 4 x float>,
+ <vscale x 4 x float>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x float> @intrinsic_vfmsac_mask_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv4f32_nxv4f32_nxv4f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu
+; CHECK-NEXT: vfmsac.vv v16, v18, v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x float> @llvm.riscv.vfmsac.mask.nxv4f32.nxv4f32(
+ <vscale x 4 x float> %0,
+ <vscale x 4 x float> %1,
+ <vscale x 4 x float> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vfmsac.nxv8f32.nxv8f32(
+ <vscale x 8 x float>,
+ <vscale x 8 x float>,
+ <vscale x 8 x float>,
+ i64);
+
+define <vscale x 8 x float> @intrinsic_vfmsac_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmsac_vv_nxv8f32_nxv8f32_nxv8f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, zero, e32,m4,ta,mu
+; CHECK-NEXT: vle32.v v28, (a0)
+; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu
+; CHECK-NEXT: vfmsac.vv v16, v20, v28
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x float> @llvm.riscv.vfmsac.nxv8f32.nxv8f32(
+ <vscale x 8 x float> %0,
+ <vscale x 8 x float> %1,
+ <vscale x 8 x float> %2,
+ i64 %3)
+
+ ret <vscale x 8 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vfmsac.mask.nxv8f32.nxv8f32(
+ <vscale x 8 x float>,
+ <vscale x 8 x float>,
+ <vscale x 8 x float>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x float> @intrinsic_vfmsac_mask_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv8f32_nxv8f32_nxv8f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, zero, e32,m4,ta,mu
+; CHECK-NEXT: vle32.v v28, (a0)
+; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu
+; CHECK-NEXT: vfmsac.vv v16, v20, v28, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x float> @llvm.riscv.vfmsac.mask.nxv8f32.nxv8f32(
+ <vscale x 8 x float> %0,
+ <vscale x 8 x float> %1,
+ <vscale x 8 x float> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x float> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vfmsac.nxv1f64.nxv1f64(
+ <vscale x 1 x double>,
+ <vscale x 1 x double>,
+ <vscale x 1 x double>,
+ i64);
+
+define <vscale x 1 x double> @intrinsic_vfmsac_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmsac_vv_nxv1f64_nxv1f64_nxv1f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu
+; CHECK-NEXT: vfmsac.vv v16, v17, v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x double> @llvm.riscv.vfmsac.nxv1f64.nxv1f64(
+ <vscale x 1 x double> %0,
+ <vscale x 1 x double> %1,
+ <vscale x 1 x double> %2,
+ i64 %3)
+
+ ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vfmsac.mask.nxv1f64.nxv1f64(
+ <vscale x 1 x double>,
+ <vscale x 1 x double>,
+ <vscale x 1 x double>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x double> @intrinsic_vfmsac_mask_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv1f64_nxv1f64_nxv1f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu
+; CHECK-NEXT: vfmsac.vv v16, v17, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x double> @llvm.riscv.vfmsac.mask.nxv1f64.nxv1f64(
+ <vscale x 1 x double> %0,
+ <vscale x 1 x double> %1,
+ <vscale x 1 x double> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vfmsac.nxv2f64.nxv2f64(
+ <vscale x 2 x double>,
+ <vscale x 2 x double>,
+ <vscale x 2 x double>,
+ i64);
+
+define <vscale x 2 x double> @intrinsic_vfmsac_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmsac_vv_nxv2f64_nxv2f64_nxv2f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu
+; CHECK-NEXT: vfmsac.vv v16, v18, v20
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x double> @llvm.riscv.vfmsac.nxv2f64.nxv2f64(
+ <vscale x 2 x double> %0,
+ <vscale x 2 x double> %1,
+ <vscale x 2 x double> %2,
+ i64 %3)
+
+ ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vfmsac.mask.nxv2f64.nxv2f64(
+ <vscale x 2 x double>,
+ <vscale x 2 x double>,
+ <vscale x 2 x double>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x double> @intrinsic_vfmsac_mask_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv2f64_nxv2f64_nxv2f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu
+; CHECK-NEXT: vfmsac.vv v16, v18, v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x double> @llvm.riscv.vfmsac.mask.nxv2f64.nxv2f64(
+ <vscale x 2 x double> %0,
+ <vscale x 2 x double> %1,
+ <vscale x 2 x double> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vfmsac.nxv4f64.nxv4f64(
+ <vscale x 4 x double>,
+ <vscale x 4 x double>,
+ <vscale x 4 x double>,
+ i64);
+
+define <vscale x 4 x double> @intrinsic_vfmsac_vv_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmsac_vv_nxv4f64_nxv4f64_nxv4f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, zero, e64,m4,ta,mu
+; CHECK-NEXT: vle64.v v28, (a0)
+; CHECK-NEXT: vsetvli a0, a1, e64,m4,ta,mu
+; CHECK-NEXT: vfmsac.vv v16, v20, v28
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x double> @llvm.riscv.vfmsac.nxv4f64.nxv4f64(
+ <vscale x 4 x double> %0,
+ <vscale x 4 x double> %1,
+ <vscale x 4 x double> %2,
+ i64 %3)
+
+ ret <vscale x 4 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vfmsac.mask.nxv4f64.nxv4f64(
+ <vscale x 4 x double>,
+ <vscale x 4 x double>,
+ <vscale x 4 x double>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x double> @intrinsic_vfmsac_mask_vv_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv4f64_nxv4f64_nxv4f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, zero, e64,m4,ta,mu
+; CHECK-NEXT: vle64.v v28, (a0)
+; CHECK-NEXT: vsetvli a0, a1, e64,m4,ta,mu
+; CHECK-NEXT: vfmsac.vv v16, v20, v28, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x double> @llvm.riscv.vfmsac.mask.nxv4f64.nxv4f64(
+ <vscale x 4 x double> %0,
+ <vscale x 4 x double> %1,
+ <vscale x 4 x double> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x double> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vfmsac.nxv1f16.f16(
+ <vscale x 1 x half>,
+ half,
+ <vscale x 1 x half>,
+ i64);
+
+define <vscale x 1 x half> @intrinsic_vfmsac_vf_nxv1f16_f16_nxv1f16(<vscale x 1 x half> %0, half %1, <vscale x 1 x half> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmsac_vf_nxv1f16_f16_nxv1f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.h.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e16,mf4,ta,mu
+; CHECK-NEXT: vfmsac.vf v16, ft0, v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x half> @llvm.riscv.vfmsac.nxv1f16.f16(
+ <vscale x 1 x half> %0,
+ half %1,
+ <vscale x 1 x half> %2,
+ i64 %3)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vfmsac.mask.nxv1f16.f16(
+ <vscale x 1 x half>,
+ half,
+ <vscale x 1 x half>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x half> @intrinsic_vfmsac_mask_vf_nxv1f16_f16_nxv1f16(<vscale x 1 x half> %0, half %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv1f16_f16_nxv1f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.h.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e16,mf4,ta,mu
+; CHECK-NEXT: vfmsac.vf v16, ft0, v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x half> @llvm.riscv.vfmsac.mask.nxv1f16.f16(
+ <vscale x 1 x half> %0,
+ half %1,
+ <vscale x 1 x half> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 2 x half> @llvm.riscv.vfmsac.nxv2f16.f16(
+ <vscale x 2 x half>,
+ half,
+ <vscale x 2 x half>,
+ i64);
+
+define <vscale x 2 x half> @intrinsic_vfmsac_vf_nxv2f16_f16_nxv2f16(<vscale x 2 x half> %0, half %1, <vscale x 2 x half> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmsac_vf_nxv2f16_f16_nxv2f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.h.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e16,mf2,ta,mu
+; CHECK-NEXT: vfmsac.vf v16, ft0, v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x half> @llvm.riscv.vfmsac.nxv2f16.f16(
+ <vscale x 2 x half> %0,
+ half %1,
+ <vscale x 2 x half> %2,
+ i64 %3)
+
+ ret <vscale x 2 x half> %a
+}
+
+declare <vscale x 2 x half> @llvm.riscv.vfmsac.mask.nxv2f16.f16(
+ <vscale x 2 x half>,
+ half,
+ <vscale x 2 x half>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x half> @intrinsic_vfmsac_mask_vf_nxv2f16_f16_nxv2f16(<vscale x 2 x half> %0, half %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv2f16_f16_nxv2f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.h.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e16,mf2,ta,mu
+; CHECK-NEXT: vfmsac.vf v16, ft0, v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x half> @llvm.riscv.vfmsac.mask.nxv2f16.f16(
+ <vscale x 2 x half> %0,
+ half %1,
+ <vscale x 2 x half> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vfmsac.nxv4f16.f16(
+ <vscale x 4 x half>,
+ half,
+ <vscale x 4 x half>,
+ i64);
+
+define <vscale x 4 x half> @intrinsic_vfmsac_vf_nxv4f16_f16_nxv4f16(<vscale x 4 x half> %0, half %1, <vscale x 4 x half> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmsac_vf_nxv4f16_f16_nxv4f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.h.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e16,m1,ta,mu
+; CHECK-NEXT: vfmsac.vf v16, ft0, v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x half> @llvm.riscv.vfmsac.nxv4f16.f16(
+ <vscale x 4 x half> %0,
+ half %1,
+ <vscale x 4 x half> %2,
+ i64 %3)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vfmsac.mask.nxv4f16.f16(
+ <vscale x 4 x half>,
+ half,
+ <vscale x 4 x half>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x half> @intrinsic_vfmsac_mask_vf_nxv4f16_f16_nxv4f16(<vscale x 4 x half> %0, half %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv4f16_f16_nxv4f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.h.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e16,m1,ta,mu
+; CHECK-NEXT: vfmsac.vf v16, ft0, v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x half> @llvm.riscv.vfmsac.mask.nxv4f16.f16(
+ <vscale x 4 x half> %0,
+ half %1,
+ <vscale x 4 x half> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 8 x half> @llvm.riscv.vfmsac.nxv8f16.f16(
+ <vscale x 8 x half>,
+ half,
+ <vscale x 8 x half>,
+ i64);
+
+define <vscale x 8 x half> @intrinsic_vfmsac_vf_nxv8f16_f16_nxv8f16(<vscale x 8 x half> %0, half %1, <vscale x 8 x half> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmsac_vf_nxv8f16_f16_nxv8f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.h.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e16,m2,ta,mu
+; CHECK-NEXT: vfmsac.vf v16, ft0, v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x half> @llvm.riscv.vfmsac.nxv8f16.f16(
+ <vscale x 8 x half> %0,
+ half %1,
+ <vscale x 8 x half> %2,
+ i64 %3)
+
+ ret <vscale x 8 x half> %a
+}
+
+declare <vscale x 8 x half> @llvm.riscv.vfmsac.mask.nxv8f16.f16(
+ <vscale x 8 x half>,
+ half,
+ <vscale x 8 x half>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x half> @intrinsic_vfmsac_mask_vf_nxv8f16_f16_nxv8f16(<vscale x 8 x half> %0, half %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv8f16_f16_nxv8f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.h.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e16,m2,ta,mu
+; CHECK-NEXT: vfmsac.vf v16, ft0, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x half> @llvm.riscv.vfmsac.mask.nxv8f16.f16(
+ <vscale x 8 x half> %0,
+ half %1,
+ <vscale x 8 x half> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x half> %a
+}
+
+declare <vscale x 16 x half> @llvm.riscv.vfmsac.nxv16f16.f16(
+ <vscale x 16 x half>,
+ half,
+ <vscale x 16 x half>,
+ i64);
+
+define <vscale x 16 x half> @intrinsic_vfmsac_vf_nxv16f16_f16_nxv16f16(<vscale x 16 x half> %0, half %1, <vscale x 16 x half> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmsac_vf_nxv16f16_f16_nxv16f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.h.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu
+; CHECK-NEXT: vfmsac.vf v16, ft0, v20
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x half> @llvm.riscv.vfmsac.nxv16f16.f16(
+ <vscale x 16 x half> %0,
+ half %1,
+ <vscale x 16 x half> %2,
+ i64 %3)
+
+ ret <vscale x 16 x half> %a
+}
+
+declare <vscale x 16 x half> @llvm.riscv.vfmsac.mask.nxv16f16.f16(
+ <vscale x 16 x half>,
+ half,
+ <vscale x 16 x half>,
+ <vscale x 16 x i1>,
+ i64);
+
+define <vscale x 16 x half> @intrinsic_vfmsac_mask_vf_nxv16f16_f16_nxv16f16(<vscale x 16 x half> %0, half %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv16f16_f16_nxv16f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.h.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu
+; CHECK-NEXT: vfmsac.vf v16, ft0, v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x half> @llvm.riscv.vfmsac.mask.nxv16f16.f16(
+ <vscale x 16 x half> %0,
+ half %1,
+ <vscale x 16 x half> %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 16 x half> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vfmsac.nxv1f32.f32(
+ <vscale x 1 x float>,
+ float,
+ <vscale x 1 x float>,
+ i64);
+
+define <vscale x 1 x float> @intrinsic_vfmsac_vf_nxv1f32_f32_nxv1f32(<vscale x 1 x float> %0, float %1, <vscale x 1 x float> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmsac_vf_nxv1f32_f32_nxv1f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.w.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e32,mf2,ta,mu
+; CHECK-NEXT: vfmsac.vf v16, ft0, v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vfmsac.nxv1f32.f32(
+ <vscale x 1 x float> %0,
+ float %1,
+ <vscale x 1 x float> %2,
+ i64 %3)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vfmsac.mask.nxv1f32.f32(
+ <vscale x 1 x float>,
+ float,
+ <vscale x 1 x float>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x float> @intrinsic_vfmsac_mask_vf_nxv1f32_f32_nxv1f32(<vscale x 1 x float> %0, float %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv1f32_f32_nxv1f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.w.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e32,mf2,ta,mu
+; CHECK-NEXT: vfmsac.vf v16, ft0, v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vfmsac.mask.nxv1f32.f32(
+ <vscale x 1 x float> %0,
+ float %1,
+ <vscale x 1 x float> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfmsac.nxv2f32.f32(
+ <vscale x 2 x float>,
+ float,
+ <vscale x 2 x float>,
+ i64);
+
+define <vscale x 2 x float> @intrinsic_vfmsac_vf_nxv2f32_f32_nxv2f32(<vscale x 2 x float> %0, float %1, <vscale x 2 x float> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmsac_vf_nxv2f32_f32_nxv2f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.w.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e32,m1,ta,mu
+; CHECK-NEXT: vfmsac.vf v16, ft0, v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x float> @llvm.riscv.vfmsac.nxv2f32.f32(
+ <vscale x 2 x float> %0,
+ float %1,
+ <vscale x 2 x float> %2,
+ i64 %3)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfmsac.mask.nxv2f32.f32(
+ <vscale x 2 x float>,
+ float,
+ <vscale x 2 x float>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x float> @intrinsic_vfmsac_mask_vf_nxv2f32_f32_nxv2f32(<vscale x 2 x float> %0, float %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv2f32_f32_nxv2f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.w.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e32,m1,ta,mu
+; CHECK-NEXT: vfmsac.vf v16, ft0, v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x float> @llvm.riscv.vfmsac.mask.nxv2f32.f32(
+ <vscale x 2 x float> %0,
+ float %1,
+ <vscale x 2 x float> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vfmsac.nxv4f32.f32(
+ <vscale x 4 x float>,
+ float,
+ <vscale x 4 x float>,
+ i64);
+
+define <vscale x 4 x float> @intrinsic_vfmsac_vf_nxv4f32_f32_nxv4f32(<vscale x 4 x float> %0, float %1, <vscale x 4 x float> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmsac_vf_nxv4f32_f32_nxv4f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.w.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e32,m2,ta,mu
+; CHECK-NEXT: vfmsac.vf v16, ft0, v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x float> @llvm.riscv.vfmsac.nxv4f32.f32(
+ <vscale x 4 x float> %0,
+ float %1,
+ <vscale x 4 x float> %2,
+ i64 %3)
+
+ ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vfmsac.mask.nxv4f32.f32(
+ <vscale x 4 x float>,
+ float,
+ <vscale x 4 x float>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x float> @intrinsic_vfmsac_mask_vf_nxv4f32_f32_nxv4f32(<vscale x 4 x float> %0, float %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv4f32_f32_nxv4f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.w.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e32,m2,ta,mu
+; CHECK-NEXT: vfmsac.vf v16, ft0, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x float> @llvm.riscv.vfmsac.mask.nxv4f32.f32(
+ <vscale x 4 x float> %0,
+ float %1,
+ <vscale x 4 x float> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vfmsac.nxv8f32.f32(
+ <vscale x 8 x float>,
+ float,
+ <vscale x 8 x float>,
+ i64);
+
+define <vscale x 8 x float> @intrinsic_vfmsac_vf_nxv8f32_f32_nxv8f32(<vscale x 8 x float> %0, float %1, <vscale x 8 x float> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmsac_vf_nxv8f32_f32_nxv8f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.w.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu
+; CHECK-NEXT: vfmsac.vf v16, ft0, v20
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x float> @llvm.riscv.vfmsac.nxv8f32.f32(
+ <vscale x 8 x float> %0,
+ float %1,
+ <vscale x 8 x float> %2,
+ i64 %3)
+
+ ret <vscale x 8 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vfmsac.mask.nxv8f32.f32(
+ <vscale x 8 x float>,
+ float,
+ <vscale x 8 x float>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x float> @intrinsic_vfmsac_mask_vf_nxv8f32_f32_nxv8f32(<vscale x 8 x float> %0, float %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv8f32_f32_nxv8f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.w.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu
+; CHECK-NEXT: vfmsac.vf v16, ft0, v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x float> @llvm.riscv.vfmsac.mask.nxv8f32.f32(
+ <vscale x 8 x float> %0,
+ float %1,
+ <vscale x 8 x float> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x float> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vfmsac.nxv1f64.f64(
+ <vscale x 1 x double>,
+ double,
+ <vscale x 1 x double>,
+ i64);
+
+define <vscale x 1 x double> @intrinsic_vfmsac_vf_nxv1f64_f64_nxv1f64(<vscale x 1 x double> %0, double %1, <vscale x 1 x double> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmsac_vf_nxv1f64_f64_nxv1f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.d.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e64,m1,ta,mu
+; CHECK-NEXT: vfmsac.vf v16, ft0, v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x double> @llvm.riscv.vfmsac.nxv1f64.f64(
+ <vscale x 1 x double> %0,
+ double %1,
+ <vscale x 1 x double> %2,
+ i64 %3)
+
+ ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vfmsac.mask.nxv1f64.f64(
+ <vscale x 1 x double>,
+ double,
+ <vscale x 1 x double>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x double> @intrinsic_vfmsac_mask_vf_nxv1f64_f64_nxv1f64(<vscale x 1 x double> %0, double %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv1f64_f64_nxv1f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.d.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e64,m1,ta,mu
+; CHECK-NEXT: vfmsac.vf v16, ft0, v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x double> @llvm.riscv.vfmsac.mask.nxv1f64.f64(
+ <vscale x 1 x double> %0,
+ double %1,
+ <vscale x 1 x double> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vfmsac.nxv2f64.f64(
+ <vscale x 2 x double>,
+ double,
+ <vscale x 2 x double>,
+ i64);
+
+define <vscale x 2 x double> @intrinsic_vfmsac_vf_nxv2f64_f64_nxv2f64(<vscale x 2 x double> %0, double %1, <vscale x 2 x double> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmsac_vf_nxv2f64_f64_nxv2f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.d.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e64,m2,ta,mu
+; CHECK-NEXT: vfmsac.vf v16, ft0, v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x double> @llvm.riscv.vfmsac.nxv2f64.f64(
+ <vscale x 2 x double> %0,
+ double %1,
+ <vscale x 2 x double> %2,
+ i64 %3)
+
+ ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vfmsac.mask.nxv2f64.f64(
+ <vscale x 2 x double>,
+ double,
+ <vscale x 2 x double>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x double> @intrinsic_vfmsac_mask_vf_nxv2f64_f64_nxv2f64(<vscale x 2 x double> %0, double %1, <vscale x 2 x double> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv2f64_f64_nxv2f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.d.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e64,m2,ta,mu
+; CHECK-NEXT: vfmsac.vf v16, ft0, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x double> @llvm.riscv.vfmsac.mask.nxv2f64.f64(
+ <vscale x 2 x double> %0,
+ double %1,
+ <vscale x 2 x double> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vfmsac.nxv4f64.f64(
+ <vscale x 4 x double>,
+ double,
+ <vscale x 4 x double>,
+ i64);
+
+define <vscale x 4 x double> @intrinsic_vfmsac_vf_nxv4f64_f64_nxv4f64(<vscale x 4 x double> %0, double %1, <vscale x 4 x double> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmsac_vf_nxv4f64_f64_nxv4f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.d.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e64,m4,ta,mu
+; CHECK-NEXT: vfmsac.vf v16, ft0, v20
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x double> @llvm.riscv.vfmsac.nxv4f64.f64(
+ <vscale x 4 x double> %0,
+ double %1,
+ <vscale x 4 x double> %2,
+ i64 %3)
+
+ ret <vscale x 4 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vfmsac.mask.nxv4f64.f64(
+ <vscale x 4 x double>,
+ double,
+ <vscale x 4 x double>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x double> @intrinsic_vfmsac_mask_vf_nxv4f64_f64_nxv4f64(<vscale x 4 x double> %0, double %1, <vscale x 4 x double> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv4f64_f64_nxv4f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.d.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e64,m4,ta,mu
+; CHECK-NEXT: vfmsac.vf v16, ft0, v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x double> @llvm.riscv.vfmsac.mask.nxv4f64.f64(
+ <vscale x 4 x double> %0,
+ double %1,
+ <vscale x 4 x double> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x double> %a
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmsub-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfmsub-rv32.ll
new file mode 100644
index 000000000000..1d8cb060e322
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vfmsub-rv32.ll
@@ -0,0 +1,856 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f,+experimental-zfh -verify-machineinstrs \
+; RUN: --riscv-no-aliases < %s | FileCheck %s
+declare <vscale x 1 x half> @llvm.riscv.vfmsub.nxv1f16.nxv1f16(
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ i32);
+
+define <vscale x 1 x half> @intrinsic_vfmsub_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmsub_vv_nxv1f16_nxv1f16_nxv1f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu
+; CHECK-NEXT: vfmsub.vv v16, v17, v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x half> @llvm.riscv.vfmsub.nxv1f16.nxv1f16(
+ <vscale x 1 x half> %0,
+ <vscale x 1 x half> %1,
+ <vscale x 1 x half> %2,
+ i32 %3)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vfmsub.mask.nxv1f16.nxv1f16(
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x half> @intrinsic_vfmsub_mask_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv1f16_nxv1f16_nxv1f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu
+; CHECK-NEXT: vfmsub.vv v16, v17, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x half> @llvm.riscv.vfmsub.mask.nxv1f16.nxv1f16(
+ <vscale x 1 x half> %0,
+ <vscale x 1 x half> %1,
+ <vscale x 1 x half> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 2 x half> @llvm.riscv.vfmsub.nxv2f16.nxv2f16(
+ <vscale x 2 x half>,
+ <vscale x 2 x half>,
+ <vscale x 2 x half>,
+ i32);
+
+define <vscale x 2 x half> @intrinsic_vfmsub_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmsub_vv_nxv2f16_nxv2f16_nxv2f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu
+; CHECK-NEXT: vfmsub.vv v16, v17, v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x half> @llvm.riscv.vfmsub.nxv2f16.nxv2f16(
+ <vscale x 2 x half> %0,
+ <vscale x 2 x half> %1,
+ <vscale x 2 x half> %2,
+ i32 %3)
+
+ ret <vscale x 2 x half> %a
+}
+
+declare <vscale x 2 x half> @llvm.riscv.vfmsub.mask.nxv2f16.nxv2f16(
+ <vscale x 2 x half>,
+ <vscale x 2 x half>,
+ <vscale x 2 x half>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x half> @intrinsic_vfmsub_mask_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv2f16_nxv2f16_nxv2f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu
+; CHECK-NEXT: vfmsub.vv v16, v17, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x half> @llvm.riscv.vfmsub.mask.nxv2f16.nxv2f16(
+ <vscale x 2 x half> %0,
+ <vscale x 2 x half> %1,
+ <vscale x 2 x half> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vfmsub.nxv4f16.nxv4f16(
+ <vscale x 4 x half>,
+ <vscale x 4 x half>,
+ <vscale x 4 x half>,
+ i32);
+
+define <vscale x 4 x half> @intrinsic_vfmsub_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmsub_vv_nxv4f16_nxv4f16_nxv4f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu
+; CHECK-NEXT: vfmsub.vv v16, v17, v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x half> @llvm.riscv.vfmsub.nxv4f16.nxv4f16(
+ <vscale x 4 x half> %0,
+ <vscale x 4 x half> %1,
+ <vscale x 4 x half> %2,
+ i32 %3)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vfmsub.mask.nxv4f16.nxv4f16(
+ <vscale x 4 x half>,
+ <vscale x 4 x half>,
+ <vscale x 4 x half>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x half> @intrinsic_vfmsub_mask_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv4f16_nxv4f16_nxv4f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu
+; CHECK-NEXT: vfmsub.vv v16, v17, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x half> @llvm.riscv.vfmsub.mask.nxv4f16.nxv4f16(
+ <vscale x 4 x half> %0,
+ <vscale x 4 x half> %1,
+ <vscale x 4 x half> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 8 x half> @llvm.riscv.vfmsub.nxv8f16.nxv8f16(
+ <vscale x 8 x half>,
+ <vscale x 8 x half>,
+ <vscale x 8 x half>,
+ i32);
+
+define <vscale x 8 x half> @intrinsic_vfmsub_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmsub_vv_nxv8f16_nxv8f16_nxv8f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu
+; CHECK-NEXT: vfmsub.vv v16, v18, v20
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x half> @llvm.riscv.vfmsub.nxv8f16.nxv8f16(
+ <vscale x 8 x half> %0,
+ <vscale x 8 x half> %1,
+ <vscale x 8 x half> %2,
+ i32 %3)
+
+ ret <vscale x 8 x half> %a
+}
+
+declare <vscale x 8 x half> @llvm.riscv.vfmsub.mask.nxv8f16.nxv8f16(
+ <vscale x 8 x half>,
+ <vscale x 8 x half>,
+ <vscale x 8 x half>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x half> @intrinsic_vfmsub_mask_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv8f16_nxv8f16_nxv8f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu
+; CHECK-NEXT: vfmsub.vv v16, v18, v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x half> @llvm.riscv.vfmsub.mask.nxv8f16.nxv8f16(
+ <vscale x 8 x half> %0,
+ <vscale x 8 x half> %1,
+ <vscale x 8 x half> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x half> %a
+}
+
+declare <vscale x 16 x half> @llvm.riscv.vfmsub.nxv16f16.nxv16f16(
+ <vscale x 16 x half>,
+ <vscale x 16 x half>,
+ <vscale x 16 x half>,
+ i32);
+
+define <vscale x 16 x half> @intrinsic_vfmsub_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmsub_vv_nxv16f16_nxv16f16_nxv16f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, zero, e16,m4,ta,mu
+; CHECK-NEXT: vle16.v v28, (a0)
+; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu
+; CHECK-NEXT: vfmsub.vv v16, v20, v28
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x half> @llvm.riscv.vfmsub.nxv16f16.nxv16f16(
+ <vscale x 16 x half> %0,
+ <vscale x 16 x half> %1,
+ <vscale x 16 x half> %2,
+ i32 %3)
+
+ ret <vscale x 16 x half> %a
+}
+
+declare <vscale x 16 x half> @llvm.riscv.vfmsub.mask.nxv16f16.nxv16f16(
+ <vscale x 16 x half>,
+ <vscale x 16 x half>,
+ <vscale x 16 x half>,
+ <vscale x 16 x i1>,
+ i32);
+
+define <vscale x 16 x half> @intrinsic_vfmsub_mask_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv16f16_nxv16f16_nxv16f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, zero, e16,m4,ta,mu
+; CHECK-NEXT: vle16.v v28, (a0)
+; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu
+; CHECK-NEXT: vfmsub.vv v16, v20, v28, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x half> @llvm.riscv.vfmsub.mask.nxv16f16.nxv16f16(
+ <vscale x 16 x half> %0,
+ <vscale x 16 x half> %1,
+ <vscale x 16 x half> %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 16 x half> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vfmsub.nxv1f32.nxv1f32(
+ <vscale x 1 x float>,
+ <vscale x 1 x float>,
+ <vscale x 1 x float>,
+ i32);
+
+define <vscale x 1 x float> @intrinsic_vfmsub_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmsub_vv_nxv1f32_nxv1f32_nxv1f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu
+; CHECK-NEXT: vfmsub.vv v16, v17, v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vfmsub.nxv1f32.nxv1f32(
+ <vscale x 1 x float> %0,
+ <vscale x 1 x float> %1,
+ <vscale x 1 x float> %2,
+ i32 %3)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vfmsub.mask.nxv1f32.nxv1f32(
+ <vscale x 1 x float>,
+ <vscale x 1 x float>,
+ <vscale x 1 x float>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x float> @intrinsic_vfmsub_mask_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv1f32_nxv1f32_nxv1f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu
+; CHECK-NEXT: vfmsub.vv v16, v17, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vfmsub.mask.nxv1f32.nxv1f32(
+ <vscale x 1 x float> %0,
+ <vscale x 1 x float> %1,
+ <vscale x 1 x float> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfmsub.nxv2f32.nxv2f32(
+ <vscale x 2 x float>,
+ <vscale x 2 x float>,
+ <vscale x 2 x float>,
+ i32);
+
+define <vscale x 2 x float> @intrinsic_vfmsub_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmsub_vv_nxv2f32_nxv2f32_nxv2f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu
+; CHECK-NEXT: vfmsub.vv v16, v17, v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x float> @llvm.riscv.vfmsub.nxv2f32.nxv2f32(
+ <vscale x 2 x float> %0,
+ <vscale x 2 x float> %1,
+ <vscale x 2 x float> %2,
+ i32 %3)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfmsub.mask.nxv2f32.nxv2f32(
+ <vscale x 2 x float>,
+ <vscale x 2 x float>,
+ <vscale x 2 x float>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x float> @intrinsic_vfmsub_mask_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv2f32_nxv2f32_nxv2f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu
+; CHECK-NEXT: vfmsub.vv v16, v17, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x float> @llvm.riscv.vfmsub.mask.nxv2f32.nxv2f32(
+ <vscale x 2 x float> %0,
+ <vscale x 2 x float> %1,
+ <vscale x 2 x float> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vfmsub.nxv4f32.nxv4f32(
+ <vscale x 4 x float>,
+ <vscale x 4 x float>,
+ <vscale x 4 x float>,
+ i32);
+
+define <vscale x 4 x float> @intrinsic_vfmsub_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmsub_vv_nxv4f32_nxv4f32_nxv4f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu
+; CHECK-NEXT: vfmsub.vv v16, v18, v20
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x float> @llvm.riscv.vfmsub.nxv4f32.nxv4f32(
+ <vscale x 4 x float> %0,
+ <vscale x 4 x float> %1,
+ <vscale x 4 x float> %2,
+ i32 %3)
+
+ ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vfmsub.mask.nxv4f32.nxv4f32(
+ <vscale x 4 x float>,
+ <vscale x 4 x float>,
+ <vscale x 4 x float>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x float> @intrinsic_vfmsub_mask_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv4f32_nxv4f32_nxv4f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu
+; CHECK-NEXT: vfmsub.vv v16, v18, v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x float> @llvm.riscv.vfmsub.mask.nxv4f32.nxv4f32(
+ <vscale x 4 x float> %0,
+ <vscale x 4 x float> %1,
+ <vscale x 4 x float> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vfmsub.nxv8f32.nxv8f32(
+ <vscale x 8 x float>,
+ <vscale x 8 x float>,
+ <vscale x 8 x float>,
+ i32);
+
+define <vscale x 8 x float> @intrinsic_vfmsub_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmsub_vv_nxv8f32_nxv8f32_nxv8f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, zero, e32,m4,ta,mu
+; CHECK-NEXT: vle32.v v28, (a0)
+; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu
+; CHECK-NEXT: vfmsub.vv v16, v20, v28
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x float> @llvm.riscv.vfmsub.nxv8f32.nxv8f32(
+ <vscale x 8 x float> %0,
+ <vscale x 8 x float> %1,
+ <vscale x 8 x float> %2,
+ i32 %3)
+
+ ret <vscale x 8 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vfmsub.mask.nxv8f32.nxv8f32(
+ <vscale x 8 x float>,
+ <vscale x 8 x float>,
+ <vscale x 8 x float>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x float> @intrinsic_vfmsub_mask_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv8f32_nxv8f32_nxv8f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, zero, e32,m4,ta,mu
+; CHECK-NEXT: vle32.v v28, (a0)
+; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu
+; CHECK-NEXT: vfmsub.vv v16, v20, v28, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x float> @llvm.riscv.vfmsub.mask.nxv8f32.nxv8f32(
+ <vscale x 8 x float> %0,
+ <vscale x 8 x float> %1,
+ <vscale x 8 x float> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x float> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vfmsub.nxv1f16.f16(
+ <vscale x 1 x half>,
+ half,
+ <vscale x 1 x half>,
+ i32);
+
+define <vscale x 1 x half> @intrinsic_vfmsub_vf_nxv1f16_f16_nxv1f16(<vscale x 1 x half> %0, half %1, <vscale x 1 x half> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmsub_vf_nxv1f16_f16_nxv1f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.h.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e16,mf4,ta,mu
+; CHECK-NEXT: vfmsub.vf v16, ft0, v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x half> @llvm.riscv.vfmsub.nxv1f16.f16(
+ <vscale x 1 x half> %0,
+ half %1,
+ <vscale x 1 x half> %2,
+ i32 %3)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vfmsub.mask.nxv1f16.f16(
+ <vscale x 1 x half>,
+ half,
+ <vscale x 1 x half>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x half> @intrinsic_vfmsub_mask_vf_nxv1f16_f16_nxv1f16(<vscale x 1 x half> %0, half %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv1f16_f16_nxv1f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.h.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e16,mf4,ta,mu
+; CHECK-NEXT: vfmsub.vf v16, ft0, v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x half> @llvm.riscv.vfmsub.mask.nxv1f16.f16(
+ <vscale x 1 x half> %0,
+ half %1,
+ <vscale x 1 x half> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 2 x half> @llvm.riscv.vfmsub.nxv2f16.f16(
+ <vscale x 2 x half>,
+ half,
+ <vscale x 2 x half>,
+ i32);
+
+define <vscale x 2 x half> @intrinsic_vfmsub_vf_nxv2f16_f16_nxv2f16(<vscale x 2 x half> %0, half %1, <vscale x 2 x half> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmsub_vf_nxv2f16_f16_nxv2f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.h.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e16,mf2,ta,mu
+; CHECK-NEXT: vfmsub.vf v16, ft0, v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x half> @llvm.riscv.vfmsub.nxv2f16.f16(
+ <vscale x 2 x half> %0,
+ half %1,
+ <vscale x 2 x half> %2,
+ i32 %3)
+
+ ret <vscale x 2 x half> %a
+}
+
+declare <vscale x 2 x half> @llvm.riscv.vfmsub.mask.nxv2f16.f16(
+ <vscale x 2 x half>,
+ half,
+ <vscale x 2 x half>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x half> @intrinsic_vfmsub_mask_vf_nxv2f16_f16_nxv2f16(<vscale x 2 x half> %0, half %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv2f16_f16_nxv2f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.h.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e16,mf2,ta,mu
+; CHECK-NEXT: vfmsub.vf v16, ft0, v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x half> @llvm.riscv.vfmsub.mask.nxv2f16.f16(
+ <vscale x 2 x half> %0,
+ half %1,
+ <vscale x 2 x half> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vfmsub.nxv4f16.f16(
+ <vscale x 4 x half>,
+ half,
+ <vscale x 4 x half>,
+ i32);
+
+define <vscale x 4 x half> @intrinsic_vfmsub_vf_nxv4f16_f16_nxv4f16(<vscale x 4 x half> %0, half %1, <vscale x 4 x half> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmsub_vf_nxv4f16_f16_nxv4f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.h.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e16,m1,ta,mu
+; CHECK-NEXT: vfmsub.vf v16, ft0, v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x half> @llvm.riscv.vfmsub.nxv4f16.f16(
+ <vscale x 4 x half> %0,
+ half %1,
+ <vscale x 4 x half> %2,
+ i32 %3)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vfmsub.mask.nxv4f16.f16(
+ <vscale x 4 x half>,
+ half,
+ <vscale x 4 x half>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x half> @intrinsic_vfmsub_mask_vf_nxv4f16_f16_nxv4f16(<vscale x 4 x half> %0, half %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv4f16_f16_nxv4f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.h.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e16,m1,ta,mu
+; CHECK-NEXT: vfmsub.vf v16, ft0, v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x half> @llvm.riscv.vfmsub.mask.nxv4f16.f16(
+ <vscale x 4 x half> %0,
+ half %1,
+ <vscale x 4 x half> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 8 x half> @llvm.riscv.vfmsub.nxv8f16.f16(
+ <vscale x 8 x half>,
+ half,
+ <vscale x 8 x half>,
+ i32);
+
+define <vscale x 8 x half> @intrinsic_vfmsub_vf_nxv8f16_f16_nxv8f16(<vscale x 8 x half> %0, half %1, <vscale x 8 x half> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmsub_vf_nxv8f16_f16_nxv8f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.h.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e16,m2,ta,mu
+; CHECK-NEXT: vfmsub.vf v16, ft0, v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x half> @llvm.riscv.vfmsub.nxv8f16.f16(
+ <vscale x 8 x half> %0,
+ half %1,
+ <vscale x 8 x half> %2,
+ i32 %3)
+
+ ret <vscale x 8 x half> %a
+}
+
+declare <vscale x 8 x half> @llvm.riscv.vfmsub.mask.nxv8f16.f16(
+ <vscale x 8 x half>,
+ half,
+ <vscale x 8 x half>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x half> @intrinsic_vfmsub_mask_vf_nxv8f16_f16_nxv8f16(<vscale x 8 x half> %0, half %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv8f16_f16_nxv8f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.h.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e16,m2,ta,mu
+; CHECK-NEXT: vfmsub.vf v16, ft0, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x half> @llvm.riscv.vfmsub.mask.nxv8f16.f16(
+ <vscale x 8 x half> %0,
+ half %1,
+ <vscale x 8 x half> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x half> %a
+}
+
+declare <vscale x 16 x half> @llvm.riscv.vfmsub.nxv16f16.f16(
+ <vscale x 16 x half>,
+ half,
+ <vscale x 16 x half>,
+ i32);
+
+define <vscale x 16 x half> @intrinsic_vfmsub_vf_nxv16f16_f16_nxv16f16(<vscale x 16 x half> %0, half %1, <vscale x 16 x half> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmsub_vf_nxv16f16_f16_nxv16f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.h.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu
+; CHECK-NEXT: vfmsub.vf v16, ft0, v20
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x half> @llvm.riscv.vfmsub.nxv16f16.f16(
+ <vscale x 16 x half> %0,
+ half %1,
+ <vscale x 16 x half> %2,
+ i32 %3)
+
+ ret <vscale x 16 x half> %a
+}
+
+declare <vscale x 16 x half> @llvm.riscv.vfmsub.mask.nxv16f16.f16(
+ <vscale x 16 x half>,
+ half,
+ <vscale x 16 x half>,
+ <vscale x 16 x i1>,
+ i32);
+
+define <vscale x 16 x half> @intrinsic_vfmsub_mask_vf_nxv16f16_f16_nxv16f16(<vscale x 16 x half> %0, half %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv16f16_f16_nxv16f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.h.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu
+; CHECK-NEXT: vfmsub.vf v16, ft0, v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x half> @llvm.riscv.vfmsub.mask.nxv16f16.f16(
+ <vscale x 16 x half> %0,
+ half %1,
+ <vscale x 16 x half> %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 16 x half> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vfmsub.nxv1f32.f32(
+ <vscale x 1 x float>,
+ float,
+ <vscale x 1 x float>,
+ i32);
+
+define <vscale x 1 x float> @intrinsic_vfmsub_vf_nxv1f32_f32_nxv1f32(<vscale x 1 x float> %0, float %1, <vscale x 1 x float> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmsub_vf_nxv1f32_f32_nxv1f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.w.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e32,mf2,ta,mu
+; CHECK-NEXT: vfmsub.vf v16, ft0, v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vfmsub.nxv1f32.f32(
+ <vscale x 1 x float> %0,
+ float %1,
+ <vscale x 1 x float> %2,
+ i32 %3)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vfmsub.mask.nxv1f32.f32(
+ <vscale x 1 x float>,
+ float,
+ <vscale x 1 x float>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x float> @intrinsic_vfmsub_mask_vf_nxv1f32_f32_nxv1f32(<vscale x 1 x float> %0, float %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv1f32_f32_nxv1f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.w.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e32,mf2,ta,mu
+; CHECK-NEXT: vfmsub.vf v16, ft0, v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vfmsub.mask.nxv1f32.f32(
+ <vscale x 1 x float> %0,
+ float %1,
+ <vscale x 1 x float> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfmsub.nxv2f32.f32(
+ <vscale x 2 x float>,
+ float,
+ <vscale x 2 x float>,
+ i32);
+
+define <vscale x 2 x float> @intrinsic_vfmsub_vf_nxv2f32_f32_nxv2f32(<vscale x 2 x float> %0, float %1, <vscale x 2 x float> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmsub_vf_nxv2f32_f32_nxv2f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.w.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e32,m1,ta,mu
+; CHECK-NEXT: vfmsub.vf v16, ft0, v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x float> @llvm.riscv.vfmsub.nxv2f32.f32(
+ <vscale x 2 x float> %0,
+ float %1,
+ <vscale x 2 x float> %2,
+ i32 %3)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfmsub.mask.nxv2f32.f32(
+ <vscale x 2 x float>,
+ float,
+ <vscale x 2 x float>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x float> @intrinsic_vfmsub_mask_vf_nxv2f32_f32_nxv2f32(<vscale x 2 x float> %0, float %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv2f32_f32_nxv2f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.w.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e32,m1,ta,mu
+; CHECK-NEXT: vfmsub.vf v16, ft0, v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x float> @llvm.riscv.vfmsub.mask.nxv2f32.f32(
+ <vscale x 2 x float> %0,
+ float %1,
+ <vscale x 2 x float> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vfmsub.nxv4f32.f32(
+ <vscale x 4 x float>,
+ float,
+ <vscale x 4 x float>,
+ i32);
+
+define <vscale x 4 x float> @intrinsic_vfmsub_vf_nxv4f32_f32_nxv4f32(<vscale x 4 x float> %0, float %1, <vscale x 4 x float> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmsub_vf_nxv4f32_f32_nxv4f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.w.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e32,m2,ta,mu
+; CHECK-NEXT: vfmsub.vf v16, ft0, v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x float> @llvm.riscv.vfmsub.nxv4f32.f32(
+ <vscale x 4 x float> %0,
+ float %1,
+ <vscale x 4 x float> %2,
+ i32 %3)
+
+ ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vfmsub.mask.nxv4f32.f32(
+ <vscale x 4 x float>,
+ float,
+ <vscale x 4 x float>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x float> @intrinsic_vfmsub_mask_vf_nxv4f32_f32_nxv4f32(<vscale x 4 x float> %0, float %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv4f32_f32_nxv4f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.w.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e32,m2,ta,mu
+; CHECK-NEXT: vfmsub.vf v16, ft0, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x float> @llvm.riscv.vfmsub.mask.nxv4f32.f32(
+ <vscale x 4 x float> %0,
+ float %1,
+ <vscale x 4 x float> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vfmsub.nxv8f32.f32(
+ <vscale x 8 x float>,
+ float,
+ <vscale x 8 x float>,
+ i32);
+
+define <vscale x 8 x float> @intrinsic_vfmsub_vf_nxv8f32_f32_nxv8f32(<vscale x 8 x float> %0, float %1, <vscale x 8 x float> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmsub_vf_nxv8f32_f32_nxv8f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.w.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu
+; CHECK-NEXT: vfmsub.vf v16, ft0, v20
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x float> @llvm.riscv.vfmsub.nxv8f32.f32(
+ <vscale x 8 x float> %0,
+ float %1,
+ <vscale x 8 x float> %2,
+ i32 %3)
+
+ ret <vscale x 8 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vfmsub.mask.nxv8f32.f32(
+ <vscale x 8 x float>,
+ float,
+ <vscale x 8 x float>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x float> @intrinsic_vfmsub_mask_vf_nxv8f32_f32_nxv8f32(<vscale x 8 x float> %0, float %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv8f32_f32_nxv8f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.w.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu
+; CHECK-NEXT: vfmsub.vf v16, ft0, v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x float> @llvm.riscv.vfmsub.mask.nxv8f32.f32(
+ <vscale x 8 x float> %0,
+ float %1,
+ <vscale x 8 x float> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x float> %a
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmsub-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfmsub-rv64.ll
new file mode 100644
index 000000000000..347731829812
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vfmsub-rv64.ll
@@ -0,0 +1,1142 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
+; RUN: --riscv-no-aliases < %s | FileCheck %s
+declare <vscale x 1 x half> @llvm.riscv.vfmsub.nxv1f16.nxv1f16(
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ i64);
+
+define <vscale x 1 x half> @intrinsic_vfmsub_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmsub_vv_nxv1f16_nxv1f16_nxv1f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu
+; CHECK-NEXT: vfmsub.vv v16, v17, v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x half> @llvm.riscv.vfmsub.nxv1f16.nxv1f16(
+ <vscale x 1 x half> %0,
+ <vscale x 1 x half> %1,
+ <vscale x 1 x half> %2,
+ i64 %3)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vfmsub.mask.nxv1f16.nxv1f16(
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x half> @intrinsic_vfmsub_mask_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv1f16_nxv1f16_nxv1f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu
+; CHECK-NEXT: vfmsub.vv v16, v17, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x half> @llvm.riscv.vfmsub.mask.nxv1f16.nxv1f16(
+ <vscale x 1 x half> %0,
+ <vscale x 1 x half> %1,
+ <vscale x 1 x half> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 2 x half> @llvm.riscv.vfmsub.nxv2f16.nxv2f16(
+ <vscale x 2 x half>,
+ <vscale x 2 x half>,
+ <vscale x 2 x half>,
+ i64);
+
+define <vscale x 2 x half> @intrinsic_vfmsub_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmsub_vv_nxv2f16_nxv2f16_nxv2f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu
+; CHECK-NEXT: vfmsub.vv v16, v17, v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x half> @llvm.riscv.vfmsub.nxv2f16.nxv2f16(
+ <vscale x 2 x half> %0,
+ <vscale x 2 x half> %1,
+ <vscale x 2 x half> %2,
+ i64 %3)
+
+ ret <vscale x 2 x half> %a
+}
+
+declare <vscale x 2 x half> @llvm.riscv.vfmsub.mask.nxv2f16.nxv2f16(
+ <vscale x 2 x half>,
+ <vscale x 2 x half>,
+ <vscale x 2 x half>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x half> @intrinsic_vfmsub_mask_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv2f16_nxv2f16_nxv2f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu
+; CHECK-NEXT: vfmsub.vv v16, v17, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x half> @llvm.riscv.vfmsub.mask.nxv2f16.nxv2f16(
+ <vscale x 2 x half> %0,
+ <vscale x 2 x half> %1,
+ <vscale x 2 x half> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vfmsub.nxv4f16.nxv4f16(
+ <vscale x 4 x half>,
+ <vscale x 4 x half>,
+ <vscale x 4 x half>,
+ i64);
+
+define <vscale x 4 x half> @intrinsic_vfmsub_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmsub_vv_nxv4f16_nxv4f16_nxv4f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu
+; CHECK-NEXT: vfmsub.vv v16, v17, v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x half> @llvm.riscv.vfmsub.nxv4f16.nxv4f16(
+ <vscale x 4 x half> %0,
+ <vscale x 4 x half> %1,
+ <vscale x 4 x half> %2,
+ i64 %3)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vfmsub.mask.nxv4f16.nxv4f16(
+ <vscale x 4 x half>,
+ <vscale x 4 x half>,
+ <vscale x 4 x half>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x half> @intrinsic_vfmsub_mask_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv4f16_nxv4f16_nxv4f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu
+; CHECK-NEXT: vfmsub.vv v16, v17, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x half> @llvm.riscv.vfmsub.mask.nxv4f16.nxv4f16(
+ <vscale x 4 x half> %0,
+ <vscale x 4 x half> %1,
+ <vscale x 4 x half> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 8 x half> @llvm.riscv.vfmsub.nxv8f16.nxv8f16(
+ <vscale x 8 x half>,
+ <vscale x 8 x half>,
+ <vscale x 8 x half>,
+ i64);
+
+define <vscale x 8 x half> @intrinsic_vfmsub_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmsub_vv_nxv8f16_nxv8f16_nxv8f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu
+; CHECK-NEXT: vfmsub.vv v16, v18, v20
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x half> @llvm.riscv.vfmsub.nxv8f16.nxv8f16(
+ <vscale x 8 x half> %0,
+ <vscale x 8 x half> %1,
+ <vscale x 8 x half> %2,
+ i64 %3)
+
+ ret <vscale x 8 x half> %a
+}
+
+declare <vscale x 8 x half> @llvm.riscv.vfmsub.mask.nxv8f16.nxv8f16(
+ <vscale x 8 x half>,
+ <vscale x 8 x half>,
+ <vscale x 8 x half>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x half> @intrinsic_vfmsub_mask_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv8f16_nxv8f16_nxv8f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu
+; CHECK-NEXT: vfmsub.vv v16, v18, v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x half> @llvm.riscv.vfmsub.mask.nxv8f16.nxv8f16(
+ <vscale x 8 x half> %0,
+ <vscale x 8 x half> %1,
+ <vscale x 8 x half> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x half> %a
+}
+
+declare <vscale x 16 x half> @llvm.riscv.vfmsub.nxv16f16.nxv16f16(
+ <vscale x 16 x half>,
+ <vscale x 16 x half>,
+ <vscale x 16 x half>,
+ i64);
+
+define <vscale x 16 x half> @intrinsic_vfmsub_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmsub_vv_nxv16f16_nxv16f16_nxv16f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, zero, e16,m4,ta,mu
+; CHECK-NEXT: vle16.v v28, (a0)
+; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu
+; CHECK-NEXT: vfmsub.vv v16, v20, v28
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x half> @llvm.riscv.vfmsub.nxv16f16.nxv16f16(
+ <vscale x 16 x half> %0,
+ <vscale x 16 x half> %1,
+ <vscale x 16 x half> %2,
+ i64 %3)
+
+ ret <vscale x 16 x half> %a
+}
+
+declare <vscale x 16 x half> @llvm.riscv.vfmsub.mask.nxv16f16.nxv16f16(
+ <vscale x 16 x half>,
+ <vscale x 16 x half>,
+ <vscale x 16 x half>,
+ <vscale x 16 x i1>,
+ i64);
+
+define <vscale x 16 x half> @intrinsic_vfmsub_mask_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv16f16_nxv16f16_nxv16f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, zero, e16,m4,ta,mu
+; CHECK-NEXT: vle16.v v28, (a0)
+; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu
+; CHECK-NEXT: vfmsub.vv v16, v20, v28, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x half> @llvm.riscv.vfmsub.mask.nxv16f16.nxv16f16(
+ <vscale x 16 x half> %0,
+ <vscale x 16 x half> %1,
+ <vscale x 16 x half> %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 16 x half> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vfmsub.nxv1f32.nxv1f32(
+ <vscale x 1 x float>,
+ <vscale x 1 x float>,
+ <vscale x 1 x float>,
+ i64);
+
+define <vscale x 1 x float> @intrinsic_vfmsub_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmsub_vv_nxv1f32_nxv1f32_nxv1f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu
+; CHECK-NEXT: vfmsub.vv v16, v17, v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vfmsub.nxv1f32.nxv1f32(
+ <vscale x 1 x float> %0,
+ <vscale x 1 x float> %1,
+ <vscale x 1 x float> %2,
+ i64 %3)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vfmsub.mask.nxv1f32.nxv1f32(
+ <vscale x 1 x float>,
+ <vscale x 1 x float>,
+ <vscale x 1 x float>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x float> @intrinsic_vfmsub_mask_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv1f32_nxv1f32_nxv1f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu
+; CHECK-NEXT: vfmsub.vv v16, v17, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vfmsub.mask.nxv1f32.nxv1f32(
+ <vscale x 1 x float> %0,
+ <vscale x 1 x float> %1,
+ <vscale x 1 x float> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfmsub.nxv2f32.nxv2f32(
+ <vscale x 2 x float>,
+ <vscale x 2 x float>,
+ <vscale x 2 x float>,
+ i64);
+
+define <vscale x 2 x float> @intrinsic_vfmsub_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmsub_vv_nxv2f32_nxv2f32_nxv2f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu
+; CHECK-NEXT: vfmsub.vv v16, v17, v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x float> @llvm.riscv.vfmsub.nxv2f32.nxv2f32(
+ <vscale x 2 x float> %0,
+ <vscale x 2 x float> %1,
+ <vscale x 2 x float> %2,
+ i64 %3)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfmsub.mask.nxv2f32.nxv2f32(
+ <vscale x 2 x float>,
+ <vscale x 2 x float>,
+ <vscale x 2 x float>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x float> @intrinsic_vfmsub_mask_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv2f32_nxv2f32_nxv2f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu
+; CHECK-NEXT: vfmsub.vv v16, v17, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x float> @llvm.riscv.vfmsub.mask.nxv2f32.nxv2f32(
+ <vscale x 2 x float> %0,
+ <vscale x 2 x float> %1,
+ <vscale x 2 x float> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vfmsub.nxv4f32.nxv4f32(
+ <vscale x 4 x float>,
+ <vscale x 4 x float>,
+ <vscale x 4 x float>,
+ i64);
+
+define <vscale x 4 x float> @intrinsic_vfmsub_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmsub_vv_nxv4f32_nxv4f32_nxv4f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu
+; CHECK-NEXT: vfmsub.vv v16, v18, v20
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x float> @llvm.riscv.vfmsub.nxv4f32.nxv4f32(
+ <vscale x 4 x float> %0,
+ <vscale x 4 x float> %1,
+ <vscale x 4 x float> %2,
+ i64 %3)
+
+ ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vfmsub.mask.nxv4f32.nxv4f32(
+ <vscale x 4 x float>,
+ <vscale x 4 x float>,
+ <vscale x 4 x float>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x float> @intrinsic_vfmsub_mask_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv4f32_nxv4f32_nxv4f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu
+; CHECK-NEXT: vfmsub.vv v16, v18, v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x float> @llvm.riscv.vfmsub.mask.nxv4f32.nxv4f32(
+ <vscale x 4 x float> %0,
+ <vscale x 4 x float> %1,
+ <vscale x 4 x float> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vfmsub.nxv8f32.nxv8f32(
+ <vscale x 8 x float>,
+ <vscale x 8 x float>,
+ <vscale x 8 x float>,
+ i64);
+
+define <vscale x 8 x float> @intrinsic_vfmsub_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmsub_vv_nxv8f32_nxv8f32_nxv8f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, zero, e32,m4,ta,mu
+; CHECK-NEXT: vle32.v v28, (a0)
+; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu
+; CHECK-NEXT: vfmsub.vv v16, v20, v28
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x float> @llvm.riscv.vfmsub.nxv8f32.nxv8f32(
+ <vscale x 8 x float> %0,
+ <vscale x 8 x float> %1,
+ <vscale x 8 x float> %2,
+ i64 %3)
+
+ ret <vscale x 8 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vfmsub.mask.nxv8f32.nxv8f32(
+ <vscale x 8 x float>,
+ <vscale x 8 x float>,
+ <vscale x 8 x float>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x float> @intrinsic_vfmsub_mask_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv8f32_nxv8f32_nxv8f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, zero, e32,m4,ta,mu
+; CHECK-NEXT: vle32.v v28, (a0)
+; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu
+; CHECK-NEXT: vfmsub.vv v16, v20, v28, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x float> @llvm.riscv.vfmsub.mask.nxv8f32.nxv8f32(
+ <vscale x 8 x float> %0,
+ <vscale x 8 x float> %1,
+ <vscale x 8 x float> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x float> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vfmsub.nxv1f64.nxv1f64(
+ <vscale x 1 x double>,
+ <vscale x 1 x double>,
+ <vscale x 1 x double>,
+ i64);
+
+define <vscale x 1 x double> @intrinsic_vfmsub_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmsub_vv_nxv1f64_nxv1f64_nxv1f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu
+; CHECK-NEXT: vfmsub.vv v16, v17, v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x double> @llvm.riscv.vfmsub.nxv1f64.nxv1f64(
+ <vscale x 1 x double> %0,
+ <vscale x 1 x double> %1,
+ <vscale x 1 x double> %2,
+ i64 %3)
+
+ ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vfmsub.mask.nxv1f64.nxv1f64(
+ <vscale x 1 x double>,
+ <vscale x 1 x double>,
+ <vscale x 1 x double>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x double> @intrinsic_vfmsub_mask_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv1f64_nxv1f64_nxv1f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu
+; CHECK-NEXT: vfmsub.vv v16, v17, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x double> @llvm.riscv.vfmsub.mask.nxv1f64.nxv1f64(
+ <vscale x 1 x double> %0,
+ <vscale x 1 x double> %1,
+ <vscale x 1 x double> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vfmsub.nxv2f64.nxv2f64(
+ <vscale x 2 x double>,
+ <vscale x 2 x double>,
+ <vscale x 2 x double>,
+ i64);
+
+define <vscale x 2 x double> @intrinsic_vfmsub_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmsub_vv_nxv2f64_nxv2f64_nxv2f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu
+; CHECK-NEXT: vfmsub.vv v16, v18, v20
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x double> @llvm.riscv.vfmsub.nxv2f64.nxv2f64(
+ <vscale x 2 x double> %0,
+ <vscale x 2 x double> %1,
+ <vscale x 2 x double> %2,
+ i64 %3)
+
+ ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vfmsub.mask.nxv2f64.nxv2f64(
+ <vscale x 2 x double>,
+ <vscale x 2 x double>,
+ <vscale x 2 x double>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x double> @intrinsic_vfmsub_mask_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv2f64_nxv2f64_nxv2f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu
+; CHECK-NEXT: vfmsub.vv v16, v18, v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x double> @llvm.riscv.vfmsub.mask.nxv2f64.nxv2f64(
+ <vscale x 2 x double> %0,
+ <vscale x 2 x double> %1,
+ <vscale x 2 x double> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vfmsub.nxv4f64.nxv4f64(
+ <vscale x 4 x double>,
+ <vscale x 4 x double>,
+ <vscale x 4 x double>,
+ i64);
+
+define <vscale x 4 x double> @intrinsic_vfmsub_vv_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmsub_vv_nxv4f64_nxv4f64_nxv4f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, zero, e64,m4,ta,mu
+; CHECK-NEXT: vle64.v v28, (a0)
+; CHECK-NEXT: vsetvli a0, a1, e64,m4,ta,mu
+; CHECK-NEXT: vfmsub.vv v16, v20, v28
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x double> @llvm.riscv.vfmsub.nxv4f64.nxv4f64(
+ <vscale x 4 x double> %0,
+ <vscale x 4 x double> %1,
+ <vscale x 4 x double> %2,
+ i64 %3)
+
+ ret <vscale x 4 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vfmsub.mask.nxv4f64.nxv4f64(
+ <vscale x 4 x double>,
+ <vscale x 4 x double>,
+ <vscale x 4 x double>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x double> @intrinsic_vfmsub_mask_vv_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv4f64_nxv4f64_nxv4f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, zero, e64,m4,ta,mu
+; CHECK-NEXT: vle64.v v28, (a0)
+; CHECK-NEXT: vsetvli a0, a1, e64,m4,ta,mu
+; CHECK-NEXT: vfmsub.vv v16, v20, v28, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x double> @llvm.riscv.vfmsub.mask.nxv4f64.nxv4f64(
+ <vscale x 4 x double> %0,
+ <vscale x 4 x double> %1,
+ <vscale x 4 x double> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x double> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vfmsub.nxv1f16.f16(
+ <vscale x 1 x half>,
+ half,
+ <vscale x 1 x half>,
+ i64);
+
+define <vscale x 1 x half> @intrinsic_vfmsub_vf_nxv1f16_f16_nxv1f16(<vscale x 1 x half> %0, half %1, <vscale x 1 x half> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmsub_vf_nxv1f16_f16_nxv1f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.h.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e16,mf4,ta,mu
+; CHECK-NEXT: vfmsub.vf v16, ft0, v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x half> @llvm.riscv.vfmsub.nxv1f16.f16(
+ <vscale x 1 x half> %0,
+ half %1,
+ <vscale x 1 x half> %2,
+ i64 %3)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vfmsub.mask.nxv1f16.f16(
+ <vscale x 1 x half>,
+ half,
+ <vscale x 1 x half>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x half> @intrinsic_vfmsub_mask_vf_nxv1f16_f16_nxv1f16(<vscale x 1 x half> %0, half %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv1f16_f16_nxv1f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.h.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e16,mf4,ta,mu
+; CHECK-NEXT: vfmsub.vf v16, ft0, v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x half> @llvm.riscv.vfmsub.mask.nxv1f16.f16(
+ <vscale x 1 x half> %0,
+ half %1,
+ <vscale x 1 x half> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 2 x half> @llvm.riscv.vfmsub.nxv2f16.f16(
+ <vscale x 2 x half>,
+ half,
+ <vscale x 2 x half>,
+ i64);
+
+define <vscale x 2 x half> @intrinsic_vfmsub_vf_nxv2f16_f16_nxv2f16(<vscale x 2 x half> %0, half %1, <vscale x 2 x half> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmsub_vf_nxv2f16_f16_nxv2f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.h.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e16,mf2,ta,mu
+; CHECK-NEXT: vfmsub.vf v16, ft0, v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x half> @llvm.riscv.vfmsub.nxv2f16.f16(
+ <vscale x 2 x half> %0,
+ half %1,
+ <vscale x 2 x half> %2,
+ i64 %3)
+
+ ret <vscale x 2 x half> %a
+}
+
+declare <vscale x 2 x half> @llvm.riscv.vfmsub.mask.nxv2f16.f16(
+ <vscale x 2 x half>,
+ half,
+ <vscale x 2 x half>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x half> @intrinsic_vfmsub_mask_vf_nxv2f16_f16_nxv2f16(<vscale x 2 x half> %0, half %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv2f16_f16_nxv2f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.h.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e16,mf2,ta,mu
+; CHECK-NEXT: vfmsub.vf v16, ft0, v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x half> @llvm.riscv.vfmsub.mask.nxv2f16.f16(
+ <vscale x 2 x half> %0,
+ half %1,
+ <vscale x 2 x half> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vfmsub.nxv4f16.f16(
+ <vscale x 4 x half>,
+ half,
+ <vscale x 4 x half>,
+ i64);
+
+define <vscale x 4 x half> @intrinsic_vfmsub_vf_nxv4f16_f16_nxv4f16(<vscale x 4 x half> %0, half %1, <vscale x 4 x half> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmsub_vf_nxv4f16_f16_nxv4f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.h.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e16,m1,ta,mu
+; CHECK-NEXT: vfmsub.vf v16, ft0, v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x half> @llvm.riscv.vfmsub.nxv4f16.f16(
+ <vscale x 4 x half> %0,
+ half %1,
+ <vscale x 4 x half> %2,
+ i64 %3)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vfmsub.mask.nxv4f16.f16(
+ <vscale x 4 x half>,
+ half,
+ <vscale x 4 x half>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x half> @intrinsic_vfmsub_mask_vf_nxv4f16_f16_nxv4f16(<vscale x 4 x half> %0, half %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv4f16_f16_nxv4f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.h.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e16,m1,ta,mu
+; CHECK-NEXT: vfmsub.vf v16, ft0, v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x half> @llvm.riscv.vfmsub.mask.nxv4f16.f16(
+ <vscale x 4 x half> %0,
+ half %1,
+ <vscale x 4 x half> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 8 x half> @llvm.riscv.vfmsub.nxv8f16.f16(
+ <vscale x 8 x half>,
+ half,
+ <vscale x 8 x half>,
+ i64);
+
+define <vscale x 8 x half> @intrinsic_vfmsub_vf_nxv8f16_f16_nxv8f16(<vscale x 8 x half> %0, half %1, <vscale x 8 x half> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmsub_vf_nxv8f16_f16_nxv8f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.h.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e16,m2,ta,mu
+; CHECK-NEXT: vfmsub.vf v16, ft0, v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x half> @llvm.riscv.vfmsub.nxv8f16.f16(
+ <vscale x 8 x half> %0,
+ half %1,
+ <vscale x 8 x half> %2,
+ i64 %3)
+
+ ret <vscale x 8 x half> %a
+}
+
+declare <vscale x 8 x half> @llvm.riscv.vfmsub.mask.nxv8f16.f16(
+ <vscale x 8 x half>,
+ half,
+ <vscale x 8 x half>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x half> @intrinsic_vfmsub_mask_vf_nxv8f16_f16_nxv8f16(<vscale x 8 x half> %0, half %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv8f16_f16_nxv8f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.h.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e16,m2,ta,mu
+; CHECK-NEXT: vfmsub.vf v16, ft0, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x half> @llvm.riscv.vfmsub.mask.nxv8f16.f16(
+ <vscale x 8 x half> %0,
+ half %1,
+ <vscale x 8 x half> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x half> %a
+}
+
+declare <vscale x 16 x half> @llvm.riscv.vfmsub.nxv16f16.f16(
+ <vscale x 16 x half>,
+ half,
+ <vscale x 16 x half>,
+ i64);
+
+define <vscale x 16 x half> @intrinsic_vfmsub_vf_nxv16f16_f16_nxv16f16(<vscale x 16 x half> %0, half %1, <vscale x 16 x half> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmsub_vf_nxv16f16_f16_nxv16f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.h.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu
+; CHECK-NEXT: vfmsub.vf v16, ft0, v20
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x half> @llvm.riscv.vfmsub.nxv16f16.f16(
+ <vscale x 16 x half> %0,
+ half %1,
+ <vscale x 16 x half> %2,
+ i64 %3)
+
+ ret <vscale x 16 x half> %a
+}
+
+declare <vscale x 16 x half> @llvm.riscv.vfmsub.mask.nxv16f16.f16(
+ <vscale x 16 x half>,
+ half,
+ <vscale x 16 x half>,
+ <vscale x 16 x i1>,
+ i64);
+
+define <vscale x 16 x half> @intrinsic_vfmsub_mask_vf_nxv16f16_f16_nxv16f16(<vscale x 16 x half> %0, half %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv16f16_f16_nxv16f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.h.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu
+; CHECK-NEXT: vfmsub.vf v16, ft0, v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x half> @llvm.riscv.vfmsub.mask.nxv16f16.f16(
+ <vscale x 16 x half> %0,
+ half %1,
+ <vscale x 16 x half> %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 16 x half> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vfmsub.nxv1f32.f32(
+ <vscale x 1 x float>,
+ float,
+ <vscale x 1 x float>,
+ i64);
+
+define <vscale x 1 x float> @intrinsic_vfmsub_vf_nxv1f32_f32_nxv1f32(<vscale x 1 x float> %0, float %1, <vscale x 1 x float> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmsub_vf_nxv1f32_f32_nxv1f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.w.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e32,mf2,ta,mu
+; CHECK-NEXT: vfmsub.vf v16, ft0, v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vfmsub.nxv1f32.f32(
+ <vscale x 1 x float> %0,
+ float %1,
+ <vscale x 1 x float> %2,
+ i64 %3)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vfmsub.mask.nxv1f32.f32(
+ <vscale x 1 x float>,
+ float,
+ <vscale x 1 x float>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x float> @intrinsic_vfmsub_mask_vf_nxv1f32_f32_nxv1f32(<vscale x 1 x float> %0, float %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv1f32_f32_nxv1f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.w.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e32,mf2,ta,mu
+; CHECK-NEXT: vfmsub.vf v16, ft0, v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vfmsub.mask.nxv1f32.f32(
+ <vscale x 1 x float> %0,
+ float %1,
+ <vscale x 1 x float> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfmsub.nxv2f32.f32(
+ <vscale x 2 x float>,
+ float,
+ <vscale x 2 x float>,
+ i64);
+
+define <vscale x 2 x float> @intrinsic_vfmsub_vf_nxv2f32_f32_nxv2f32(<vscale x 2 x float> %0, float %1, <vscale x 2 x float> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmsub_vf_nxv2f32_f32_nxv2f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.w.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e32,m1,ta,mu
+; CHECK-NEXT: vfmsub.vf v16, ft0, v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x float> @llvm.riscv.vfmsub.nxv2f32.f32(
+ <vscale x 2 x float> %0,
+ float %1,
+ <vscale x 2 x float> %2,
+ i64 %3)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfmsub.mask.nxv2f32.f32(
+ <vscale x 2 x float>,
+ float,
+ <vscale x 2 x float>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x float> @intrinsic_vfmsub_mask_vf_nxv2f32_f32_nxv2f32(<vscale x 2 x float> %0, float %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv2f32_f32_nxv2f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.w.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e32,m1,ta,mu
+; CHECK-NEXT: vfmsub.vf v16, ft0, v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x float> @llvm.riscv.vfmsub.mask.nxv2f32.f32(
+ <vscale x 2 x float> %0,
+ float %1,
+ <vscale x 2 x float> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vfmsub.nxv4f32.f32(
+ <vscale x 4 x float>,
+ float,
+ <vscale x 4 x float>,
+ i64);
+
+define <vscale x 4 x float> @intrinsic_vfmsub_vf_nxv4f32_f32_nxv4f32(<vscale x 4 x float> %0, float %1, <vscale x 4 x float> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmsub_vf_nxv4f32_f32_nxv4f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.w.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e32,m2,ta,mu
+; CHECK-NEXT: vfmsub.vf v16, ft0, v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x float> @llvm.riscv.vfmsub.nxv4f32.f32(
+ <vscale x 4 x float> %0,
+ float %1,
+ <vscale x 4 x float> %2,
+ i64 %3)
+
+ ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vfmsub.mask.nxv4f32.f32(
+ <vscale x 4 x float>,
+ float,
+ <vscale x 4 x float>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x float> @intrinsic_vfmsub_mask_vf_nxv4f32_f32_nxv4f32(<vscale x 4 x float> %0, float %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv4f32_f32_nxv4f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.w.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e32,m2,ta,mu
+; CHECK-NEXT: vfmsub.vf v16, ft0, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x float> @llvm.riscv.vfmsub.mask.nxv4f32.f32(
+ <vscale x 4 x float> %0,
+ float %1,
+ <vscale x 4 x float> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vfmsub.nxv8f32.f32(
+ <vscale x 8 x float>,
+ float,
+ <vscale x 8 x float>,
+ i64);
+
+define <vscale x 8 x float> @intrinsic_vfmsub_vf_nxv8f32_f32_nxv8f32(<vscale x 8 x float> %0, float %1, <vscale x 8 x float> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmsub_vf_nxv8f32_f32_nxv8f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.w.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu
+; CHECK-NEXT: vfmsub.vf v16, ft0, v20
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x float> @llvm.riscv.vfmsub.nxv8f32.f32(
+ <vscale x 8 x float> %0,
+ float %1,
+ <vscale x 8 x float> %2,
+ i64 %3)
+
+ ret <vscale x 8 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vfmsub.mask.nxv8f32.f32(
+ <vscale x 8 x float>,
+ float,
+ <vscale x 8 x float>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x float> @intrinsic_vfmsub_mask_vf_nxv8f32_f32_nxv8f32(<vscale x 8 x float> %0, float %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv8f32_f32_nxv8f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.w.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu
+; CHECK-NEXT: vfmsub.vf v16, ft0, v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x float> @llvm.riscv.vfmsub.mask.nxv8f32.f32(
+ <vscale x 8 x float> %0,
+ float %1,
+ <vscale x 8 x float> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x float> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vfmsub.nxv1f64.f64(
+ <vscale x 1 x double>,
+ double,
+ <vscale x 1 x double>,
+ i64);
+
+define <vscale x 1 x double> @intrinsic_vfmsub_vf_nxv1f64_f64_nxv1f64(<vscale x 1 x double> %0, double %1, <vscale x 1 x double> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmsub_vf_nxv1f64_f64_nxv1f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.d.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e64,m1,ta,mu
+; CHECK-NEXT: vfmsub.vf v16, ft0, v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x double> @llvm.riscv.vfmsub.nxv1f64.f64(
+ <vscale x 1 x double> %0,
+ double %1,
+ <vscale x 1 x double> %2,
+ i64 %3)
+
+ ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vfmsub.mask.nxv1f64.f64(
+ <vscale x 1 x double>,
+ double,
+ <vscale x 1 x double>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x double> @intrinsic_vfmsub_mask_vf_nxv1f64_f64_nxv1f64(<vscale x 1 x double> %0, double %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv1f64_f64_nxv1f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.d.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e64,m1,ta,mu
+; CHECK-NEXT: vfmsub.vf v16, ft0, v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x double> @llvm.riscv.vfmsub.mask.nxv1f64.f64(
+ <vscale x 1 x double> %0,
+ double %1,
+ <vscale x 1 x double> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vfmsub.nxv2f64.f64(
+ <vscale x 2 x double>,
+ double,
+ <vscale x 2 x double>,
+ i64);
+
+define <vscale x 2 x double> @intrinsic_vfmsub_vf_nxv2f64_f64_nxv2f64(<vscale x 2 x double> %0, double %1, <vscale x 2 x double> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmsub_vf_nxv2f64_f64_nxv2f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.d.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e64,m2,ta,mu
+; CHECK-NEXT: vfmsub.vf v16, ft0, v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x double> @llvm.riscv.vfmsub.nxv2f64.f64(
+ <vscale x 2 x double> %0,
+ double %1,
+ <vscale x 2 x double> %2,
+ i64 %3)
+
+ ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vfmsub.mask.nxv2f64.f64(
+ <vscale x 2 x double>,
+ double,
+ <vscale x 2 x double>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x double> @intrinsic_vfmsub_mask_vf_nxv2f64_f64_nxv2f64(<vscale x 2 x double> %0, double %1, <vscale x 2 x double> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv2f64_f64_nxv2f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.d.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e64,m2,ta,mu
+; CHECK-NEXT: vfmsub.vf v16, ft0, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x double> @llvm.riscv.vfmsub.mask.nxv2f64.f64(
+ <vscale x 2 x double> %0,
+ double %1,
+ <vscale x 2 x double> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vfmsub.nxv4f64.f64(
+ <vscale x 4 x double>,
+ double,
+ <vscale x 4 x double>,
+ i64);
+
+define <vscale x 4 x double> @intrinsic_vfmsub_vf_nxv4f64_f64_nxv4f64(<vscale x 4 x double> %0, double %1, <vscale x 4 x double> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfmsub_vf_nxv4f64_f64_nxv4f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.d.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e64,m4,ta,mu
+; CHECK-NEXT: vfmsub.vf v16, ft0, v20
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x double> @llvm.riscv.vfmsub.nxv4f64.f64(
+ <vscale x 4 x double> %0,
+ double %1,
+ <vscale x 4 x double> %2,
+ i64 %3)
+
+ ret <vscale x 4 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vfmsub.mask.nxv4f64.f64(
+ <vscale x 4 x double>,
+ double,
+ <vscale x 4 x double>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x double> @intrinsic_vfmsub_mask_vf_nxv4f64_f64_nxv4f64(<vscale x 4 x double> %0, double %1, <vscale x 4 x double> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv4f64_f64_nxv4f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.d.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e64,m4,ta,mu
+; CHECK-NEXT: vfmsub.vf v16, ft0, v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x double> @llvm.riscv.vfmsub.mask.nxv4f64.f64(
+ <vscale x 4 x double> %0,
+ double %1,
+ <vscale x 4 x double> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x double> %a
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfnmacc-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfnmacc-rv32.ll
new file mode 100644
index 000000000000..d90664c7eb37
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vfnmacc-rv32.ll
@@ -0,0 +1,856 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f,+experimental-zfh -verify-machineinstrs \
+; RUN: --riscv-no-aliases < %s | FileCheck %s
+declare <vscale x 1 x half> @llvm.riscv.vfnmacc.nxv1f16.nxv1f16(
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ i32);
+
+define <vscale x 1 x half> @intrinsic_vfnmacc_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv1f16_nxv1f16_nxv1f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu
+; CHECK-NEXT: vfnmacc.vv v16, v17, v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x half> @llvm.riscv.vfnmacc.nxv1f16.nxv1f16(
+ <vscale x 1 x half> %0,
+ <vscale x 1 x half> %1,
+ <vscale x 1 x half> %2,
+ i32 %3)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vfnmacc.mask.nxv1f16.nxv1f16(
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x half> @intrinsic_vfnmacc_mask_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv1f16_nxv1f16_nxv1f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu
+; CHECK-NEXT: vfnmacc.vv v16, v17, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x half> @llvm.riscv.vfnmacc.mask.nxv1f16.nxv1f16(
+ <vscale x 1 x half> %0,
+ <vscale x 1 x half> %1,
+ <vscale x 1 x half> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 2 x half> @llvm.riscv.vfnmacc.nxv2f16.nxv2f16(
+ <vscale x 2 x half>,
+ <vscale x 2 x half>,
+ <vscale x 2 x half>,
+ i32);
+
+define <vscale x 2 x half> @intrinsic_vfnmacc_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv2f16_nxv2f16_nxv2f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu
+; CHECK-NEXT: vfnmacc.vv v16, v17, v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x half> @llvm.riscv.vfnmacc.nxv2f16.nxv2f16(
+ <vscale x 2 x half> %0,
+ <vscale x 2 x half> %1,
+ <vscale x 2 x half> %2,
+ i32 %3)
+
+ ret <vscale x 2 x half> %a
+}
+
+declare <vscale x 2 x half> @llvm.riscv.vfnmacc.mask.nxv2f16.nxv2f16(
+ <vscale x 2 x half>,
+ <vscale x 2 x half>,
+ <vscale x 2 x half>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x half> @intrinsic_vfnmacc_mask_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv2f16_nxv2f16_nxv2f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu
+; CHECK-NEXT: vfnmacc.vv v16, v17, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x half> @llvm.riscv.vfnmacc.mask.nxv2f16.nxv2f16(
+ <vscale x 2 x half> %0,
+ <vscale x 2 x half> %1,
+ <vscale x 2 x half> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vfnmacc.nxv4f16.nxv4f16(
+ <vscale x 4 x half>,
+ <vscale x 4 x half>,
+ <vscale x 4 x half>,
+ i32);
+
+define <vscale x 4 x half> @intrinsic_vfnmacc_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv4f16_nxv4f16_nxv4f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu
+; CHECK-NEXT: vfnmacc.vv v16, v17, v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x half> @llvm.riscv.vfnmacc.nxv4f16.nxv4f16(
+ <vscale x 4 x half> %0,
+ <vscale x 4 x half> %1,
+ <vscale x 4 x half> %2,
+ i32 %3)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vfnmacc.mask.nxv4f16.nxv4f16(
+ <vscale x 4 x half>,
+ <vscale x 4 x half>,
+ <vscale x 4 x half>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x half> @intrinsic_vfnmacc_mask_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv4f16_nxv4f16_nxv4f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu
+; CHECK-NEXT: vfnmacc.vv v16, v17, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x half> @llvm.riscv.vfnmacc.mask.nxv4f16.nxv4f16(
+ <vscale x 4 x half> %0,
+ <vscale x 4 x half> %1,
+ <vscale x 4 x half> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 8 x half> @llvm.riscv.vfnmacc.nxv8f16.nxv8f16(
+ <vscale x 8 x half>,
+ <vscale x 8 x half>,
+ <vscale x 8 x half>,
+ i32);
+
+define <vscale x 8 x half> @intrinsic_vfnmacc_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv8f16_nxv8f16_nxv8f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu
+; CHECK-NEXT: vfnmacc.vv v16, v18, v20
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x half> @llvm.riscv.vfnmacc.nxv8f16.nxv8f16(
+ <vscale x 8 x half> %0,
+ <vscale x 8 x half> %1,
+ <vscale x 8 x half> %2,
+ i32 %3)
+
+ ret <vscale x 8 x half> %a
+}
+
+declare <vscale x 8 x half> @llvm.riscv.vfnmacc.mask.nxv8f16.nxv8f16(
+ <vscale x 8 x half>,
+ <vscale x 8 x half>,
+ <vscale x 8 x half>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x half> @intrinsic_vfnmacc_mask_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv8f16_nxv8f16_nxv8f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu
+; CHECK-NEXT: vfnmacc.vv v16, v18, v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x half> @llvm.riscv.vfnmacc.mask.nxv8f16.nxv8f16(
+ <vscale x 8 x half> %0,
+ <vscale x 8 x half> %1,
+ <vscale x 8 x half> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x half> %a
+}
+
+declare <vscale x 16 x half> @llvm.riscv.vfnmacc.nxv16f16.nxv16f16(
+ <vscale x 16 x half>,
+ <vscale x 16 x half>,
+ <vscale x 16 x half>,
+ i32);
+
+define <vscale x 16 x half> @intrinsic_vfnmacc_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv16f16_nxv16f16_nxv16f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, zero, e16,m4,ta,mu
+; CHECK-NEXT: vle16.v v28, (a0)
+; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu
+; CHECK-NEXT: vfnmacc.vv v16, v20, v28
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x half> @llvm.riscv.vfnmacc.nxv16f16.nxv16f16(
+ <vscale x 16 x half> %0,
+ <vscale x 16 x half> %1,
+ <vscale x 16 x half> %2,
+ i32 %3)
+
+ ret <vscale x 16 x half> %a
+}
+
+declare <vscale x 16 x half> @llvm.riscv.vfnmacc.mask.nxv16f16.nxv16f16(
+ <vscale x 16 x half>,
+ <vscale x 16 x half>,
+ <vscale x 16 x half>,
+ <vscale x 16 x i1>,
+ i32);
+
+define <vscale x 16 x half> @intrinsic_vfnmacc_mask_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv16f16_nxv16f16_nxv16f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, zero, e16,m4,ta,mu
+; CHECK-NEXT: vle16.v v28, (a0)
+; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu
+; CHECK-NEXT: vfnmacc.vv v16, v20, v28, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x half> @llvm.riscv.vfnmacc.mask.nxv16f16.nxv16f16(
+ <vscale x 16 x half> %0,
+ <vscale x 16 x half> %1,
+ <vscale x 16 x half> %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 16 x half> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vfnmacc.nxv1f32.nxv1f32(
+ <vscale x 1 x float>,
+ <vscale x 1 x float>,
+ <vscale x 1 x float>,
+ i32);
+
+define <vscale x 1 x float> @intrinsic_vfnmacc_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv1f32_nxv1f32_nxv1f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu
+; CHECK-NEXT: vfnmacc.vv v16, v17, v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vfnmacc.nxv1f32.nxv1f32(
+ <vscale x 1 x float> %0,
+ <vscale x 1 x float> %1,
+ <vscale x 1 x float> %2,
+ i32 %3)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vfnmacc.mask.nxv1f32.nxv1f32(
+ <vscale x 1 x float>,
+ <vscale x 1 x float>,
+ <vscale x 1 x float>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x float> @intrinsic_vfnmacc_mask_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv1f32_nxv1f32_nxv1f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu
+; CHECK-NEXT: vfnmacc.vv v16, v17, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vfnmacc.mask.nxv1f32.nxv1f32(
+ <vscale x 1 x float> %0,
+ <vscale x 1 x float> %1,
+ <vscale x 1 x float> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfnmacc.nxv2f32.nxv2f32(
+ <vscale x 2 x float>,
+ <vscale x 2 x float>,
+ <vscale x 2 x float>,
+ i32);
+
+define <vscale x 2 x float> @intrinsic_vfnmacc_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv2f32_nxv2f32_nxv2f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu
+; CHECK-NEXT: vfnmacc.vv v16, v17, v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x float> @llvm.riscv.vfnmacc.nxv2f32.nxv2f32(
+ <vscale x 2 x float> %0,
+ <vscale x 2 x float> %1,
+ <vscale x 2 x float> %2,
+ i32 %3)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfnmacc.mask.nxv2f32.nxv2f32(
+ <vscale x 2 x float>,
+ <vscale x 2 x float>,
+ <vscale x 2 x float>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x float> @intrinsic_vfnmacc_mask_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv2f32_nxv2f32_nxv2f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu
+; CHECK-NEXT: vfnmacc.vv v16, v17, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x float> @llvm.riscv.vfnmacc.mask.nxv2f32.nxv2f32(
+ <vscale x 2 x float> %0,
+ <vscale x 2 x float> %1,
+ <vscale x 2 x float> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vfnmacc.nxv4f32.nxv4f32(
+ <vscale x 4 x float>,
+ <vscale x 4 x float>,
+ <vscale x 4 x float>,
+ i32);
+
+define <vscale x 4 x float> @intrinsic_vfnmacc_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv4f32_nxv4f32_nxv4f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu
+; CHECK-NEXT: vfnmacc.vv v16, v18, v20
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x float> @llvm.riscv.vfnmacc.nxv4f32.nxv4f32(
+ <vscale x 4 x float> %0,
+ <vscale x 4 x float> %1,
+ <vscale x 4 x float> %2,
+ i32 %3)
+
+ ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vfnmacc.mask.nxv4f32.nxv4f32(
+ <vscale x 4 x float>,
+ <vscale x 4 x float>,
+ <vscale x 4 x float>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x float> @intrinsic_vfnmacc_mask_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv4f32_nxv4f32_nxv4f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu
+; CHECK-NEXT: vfnmacc.vv v16, v18, v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x float> @llvm.riscv.vfnmacc.mask.nxv4f32.nxv4f32(
+ <vscale x 4 x float> %0,
+ <vscale x 4 x float> %1,
+ <vscale x 4 x float> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vfnmacc.nxv8f32.nxv8f32(
+ <vscale x 8 x float>,
+ <vscale x 8 x float>,
+ <vscale x 8 x float>,
+ i32);
+
+define <vscale x 8 x float> @intrinsic_vfnmacc_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv8f32_nxv8f32_nxv8f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, zero, e32,m4,ta,mu
+; CHECK-NEXT: vle32.v v28, (a0)
+; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu
+; CHECK-NEXT: vfnmacc.vv v16, v20, v28
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x float> @llvm.riscv.vfnmacc.nxv8f32.nxv8f32(
+ <vscale x 8 x float> %0,
+ <vscale x 8 x float> %1,
+ <vscale x 8 x float> %2,
+ i32 %3)
+
+ ret <vscale x 8 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vfnmacc.mask.nxv8f32.nxv8f32(
+ <vscale x 8 x float>,
+ <vscale x 8 x float>,
+ <vscale x 8 x float>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x float> @intrinsic_vfnmacc_mask_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv8f32_nxv8f32_nxv8f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, zero, e32,m4,ta,mu
+; CHECK-NEXT: vle32.v v28, (a0)
+; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu
+; CHECK-NEXT: vfnmacc.vv v16, v20, v28, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x float> @llvm.riscv.vfnmacc.mask.nxv8f32.nxv8f32(
+ <vscale x 8 x float> %0,
+ <vscale x 8 x float> %1,
+ <vscale x 8 x float> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x float> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vfnmacc.nxv1f16.f16(
+ <vscale x 1 x half>,
+ half,
+ <vscale x 1 x half>,
+ i32);
+
+define <vscale x 1 x half> @intrinsic_vfnmacc_vf_nxv1f16_f16_nxv1f16(<vscale x 1 x half> %0, half %1, <vscale x 1 x half> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv1f16_f16_nxv1f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.h.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e16,mf4,ta,mu
+; CHECK-NEXT: vfnmacc.vf v16, ft0, v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x half> @llvm.riscv.vfnmacc.nxv1f16.f16(
+ <vscale x 1 x half> %0,
+ half %1,
+ <vscale x 1 x half> %2,
+ i32 %3)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vfnmacc.mask.nxv1f16.f16(
+ <vscale x 1 x half>,
+ half,
+ <vscale x 1 x half>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x half> @intrinsic_vfnmacc_mask_vf_nxv1f16_f16_nxv1f16(<vscale x 1 x half> %0, half %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv1f16_f16_nxv1f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.h.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e16,mf4,ta,mu
+; CHECK-NEXT: vfnmacc.vf v16, ft0, v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x half> @llvm.riscv.vfnmacc.mask.nxv1f16.f16(
+ <vscale x 1 x half> %0,
+ half %1,
+ <vscale x 1 x half> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 2 x half> @llvm.riscv.vfnmacc.nxv2f16.f16(
+ <vscale x 2 x half>,
+ half,
+ <vscale x 2 x half>,
+ i32);
+
+define <vscale x 2 x half> @intrinsic_vfnmacc_vf_nxv2f16_f16_nxv2f16(<vscale x 2 x half> %0, half %1, <vscale x 2 x half> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv2f16_f16_nxv2f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.h.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e16,mf2,ta,mu
+; CHECK-NEXT: vfnmacc.vf v16, ft0, v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x half> @llvm.riscv.vfnmacc.nxv2f16.f16(
+ <vscale x 2 x half> %0,
+ half %1,
+ <vscale x 2 x half> %2,
+ i32 %3)
+
+ ret <vscale x 2 x half> %a
+}
+
+declare <vscale x 2 x half> @llvm.riscv.vfnmacc.mask.nxv2f16.f16(
+ <vscale x 2 x half>,
+ half,
+ <vscale x 2 x half>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x half> @intrinsic_vfnmacc_mask_vf_nxv2f16_f16_nxv2f16(<vscale x 2 x half> %0, half %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv2f16_f16_nxv2f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.h.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e16,mf2,ta,mu
+; CHECK-NEXT: vfnmacc.vf v16, ft0, v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x half> @llvm.riscv.vfnmacc.mask.nxv2f16.f16(
+ <vscale x 2 x half> %0,
+ half %1,
+ <vscale x 2 x half> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vfnmacc.nxv4f16.f16(
+ <vscale x 4 x half>,
+ half,
+ <vscale x 4 x half>,
+ i32);
+
+define <vscale x 4 x half> @intrinsic_vfnmacc_vf_nxv4f16_f16_nxv4f16(<vscale x 4 x half> %0, half %1, <vscale x 4 x half> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv4f16_f16_nxv4f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.h.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e16,m1,ta,mu
+; CHECK-NEXT: vfnmacc.vf v16, ft0, v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x half> @llvm.riscv.vfnmacc.nxv4f16.f16(
+ <vscale x 4 x half> %0,
+ half %1,
+ <vscale x 4 x half> %2,
+ i32 %3)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vfnmacc.mask.nxv4f16.f16(
+ <vscale x 4 x half>,
+ half,
+ <vscale x 4 x half>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x half> @intrinsic_vfnmacc_mask_vf_nxv4f16_f16_nxv4f16(<vscale x 4 x half> %0, half %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv4f16_f16_nxv4f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.h.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e16,m1,ta,mu
+; CHECK-NEXT: vfnmacc.vf v16, ft0, v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x half> @llvm.riscv.vfnmacc.mask.nxv4f16.f16(
+ <vscale x 4 x half> %0,
+ half %1,
+ <vscale x 4 x half> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 8 x half> @llvm.riscv.vfnmacc.nxv8f16.f16(
+ <vscale x 8 x half>,
+ half,
+ <vscale x 8 x half>,
+ i32);
+
+define <vscale x 8 x half> @intrinsic_vfnmacc_vf_nxv8f16_f16_nxv8f16(<vscale x 8 x half> %0, half %1, <vscale x 8 x half> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv8f16_f16_nxv8f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.h.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e16,m2,ta,mu
+; CHECK-NEXT: vfnmacc.vf v16, ft0, v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x half> @llvm.riscv.vfnmacc.nxv8f16.f16(
+ <vscale x 8 x half> %0,
+ half %1,
+ <vscale x 8 x half> %2,
+ i32 %3)
+
+ ret <vscale x 8 x half> %a
+}
+
+declare <vscale x 8 x half> @llvm.riscv.vfnmacc.mask.nxv8f16.f16(
+ <vscale x 8 x half>,
+ half,
+ <vscale x 8 x half>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x half> @intrinsic_vfnmacc_mask_vf_nxv8f16_f16_nxv8f16(<vscale x 8 x half> %0, half %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv8f16_f16_nxv8f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.h.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e16,m2,ta,mu
+; CHECK-NEXT: vfnmacc.vf v16, ft0, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x half> @llvm.riscv.vfnmacc.mask.nxv8f16.f16(
+ <vscale x 8 x half> %0,
+ half %1,
+ <vscale x 8 x half> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x half> %a
+}
+
+declare <vscale x 16 x half> @llvm.riscv.vfnmacc.nxv16f16.f16(
+ <vscale x 16 x half>,
+ half,
+ <vscale x 16 x half>,
+ i32);
+
+define <vscale x 16 x half> @intrinsic_vfnmacc_vf_nxv16f16_f16_nxv16f16(<vscale x 16 x half> %0, half %1, <vscale x 16 x half> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv16f16_f16_nxv16f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.h.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu
+; CHECK-NEXT: vfnmacc.vf v16, ft0, v20
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x half> @llvm.riscv.vfnmacc.nxv16f16.f16(
+ <vscale x 16 x half> %0,
+ half %1,
+ <vscale x 16 x half> %2,
+ i32 %3)
+
+ ret <vscale x 16 x half> %a
+}
+
+declare <vscale x 16 x half> @llvm.riscv.vfnmacc.mask.nxv16f16.f16(
+ <vscale x 16 x half>,
+ half,
+ <vscale x 16 x half>,
+ <vscale x 16 x i1>,
+ i32);
+
+define <vscale x 16 x half> @intrinsic_vfnmacc_mask_vf_nxv16f16_f16_nxv16f16(<vscale x 16 x half> %0, half %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv16f16_f16_nxv16f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.h.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu
+; CHECK-NEXT: vfnmacc.vf v16, ft0, v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x half> @llvm.riscv.vfnmacc.mask.nxv16f16.f16(
+ <vscale x 16 x half> %0,
+ half %1,
+ <vscale x 16 x half> %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 16 x half> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vfnmacc.nxv1f32.f32(
+ <vscale x 1 x float>,
+ float,
+ <vscale x 1 x float>,
+ i32);
+
+define <vscale x 1 x float> @intrinsic_vfnmacc_vf_nxv1f32_f32_nxv1f32(<vscale x 1 x float> %0, float %1, <vscale x 1 x float> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv1f32_f32_nxv1f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.w.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e32,mf2,ta,mu
+; CHECK-NEXT: vfnmacc.vf v16, ft0, v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vfnmacc.nxv1f32.f32(
+ <vscale x 1 x float> %0,
+ float %1,
+ <vscale x 1 x float> %2,
+ i32 %3)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vfnmacc.mask.nxv1f32.f32(
+ <vscale x 1 x float>,
+ float,
+ <vscale x 1 x float>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x float> @intrinsic_vfnmacc_mask_vf_nxv1f32_f32_nxv1f32(<vscale x 1 x float> %0, float %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv1f32_f32_nxv1f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.w.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e32,mf2,ta,mu
+; CHECK-NEXT: vfnmacc.vf v16, ft0, v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vfnmacc.mask.nxv1f32.f32(
+ <vscale x 1 x float> %0,
+ float %1,
+ <vscale x 1 x float> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfnmacc.nxv2f32.f32(
+ <vscale x 2 x float>,
+ float,
+ <vscale x 2 x float>,
+ i32);
+
+define <vscale x 2 x float> @intrinsic_vfnmacc_vf_nxv2f32_f32_nxv2f32(<vscale x 2 x float> %0, float %1, <vscale x 2 x float> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv2f32_f32_nxv2f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.w.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e32,m1,ta,mu
+; CHECK-NEXT: vfnmacc.vf v16, ft0, v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x float> @llvm.riscv.vfnmacc.nxv2f32.f32(
+ <vscale x 2 x float> %0,
+ float %1,
+ <vscale x 2 x float> %2,
+ i32 %3)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfnmacc.mask.nxv2f32.f32(
+ <vscale x 2 x float>,
+ float,
+ <vscale x 2 x float>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x float> @intrinsic_vfnmacc_mask_vf_nxv2f32_f32_nxv2f32(<vscale x 2 x float> %0, float %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv2f32_f32_nxv2f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.w.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e32,m1,ta,mu
+; CHECK-NEXT: vfnmacc.vf v16, ft0, v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x float> @llvm.riscv.vfnmacc.mask.nxv2f32.f32(
+ <vscale x 2 x float> %0,
+ float %1,
+ <vscale x 2 x float> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vfnmacc.nxv4f32.f32(
+ <vscale x 4 x float>,
+ float,
+ <vscale x 4 x float>,
+ i32);
+
+define <vscale x 4 x float> @intrinsic_vfnmacc_vf_nxv4f32_f32_nxv4f32(<vscale x 4 x float> %0, float %1, <vscale x 4 x float> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv4f32_f32_nxv4f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.w.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e32,m2,ta,mu
+; CHECK-NEXT: vfnmacc.vf v16, ft0, v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x float> @llvm.riscv.vfnmacc.nxv4f32.f32(
+ <vscale x 4 x float> %0,
+ float %1,
+ <vscale x 4 x float> %2,
+ i32 %3)
+
+ ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vfnmacc.mask.nxv4f32.f32(
+ <vscale x 4 x float>,
+ float,
+ <vscale x 4 x float>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x float> @intrinsic_vfnmacc_mask_vf_nxv4f32_f32_nxv4f32(<vscale x 4 x float> %0, float %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv4f32_f32_nxv4f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.w.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e32,m2,ta,mu
+; CHECK-NEXT: vfnmacc.vf v16, ft0, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x float> @llvm.riscv.vfnmacc.mask.nxv4f32.f32(
+ <vscale x 4 x float> %0,
+ float %1,
+ <vscale x 4 x float> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vfnmacc.nxv8f32.f32(
+ <vscale x 8 x float>,
+ float,
+ <vscale x 8 x float>,
+ i32);
+
+define <vscale x 8 x float> @intrinsic_vfnmacc_vf_nxv8f32_f32_nxv8f32(<vscale x 8 x float> %0, float %1, <vscale x 8 x float> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv8f32_f32_nxv8f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.w.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu
+; CHECK-NEXT: vfnmacc.vf v16, ft0, v20
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x float> @llvm.riscv.vfnmacc.nxv8f32.f32(
+ <vscale x 8 x float> %0,
+ float %1,
+ <vscale x 8 x float> %2,
+ i32 %3)
+
+ ret <vscale x 8 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vfnmacc.mask.nxv8f32.f32(
+ <vscale x 8 x float>,
+ float,
+ <vscale x 8 x float>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x float> @intrinsic_vfnmacc_mask_vf_nxv8f32_f32_nxv8f32(<vscale x 8 x float> %0, float %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv8f32_f32_nxv8f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.w.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu
+; CHECK-NEXT: vfnmacc.vf v16, ft0, v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x float> @llvm.riscv.vfnmacc.mask.nxv8f32.f32(
+ <vscale x 8 x float> %0,
+ float %1,
+ <vscale x 8 x float> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x float> %a
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfnmacc-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfnmacc-rv64.ll
new file mode 100644
index 000000000000..9e113de8e9ca
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vfnmacc-rv64.ll
@@ -0,0 +1,1142 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
+; RUN: --riscv-no-aliases < %s | FileCheck %s
+declare <vscale x 1 x half> @llvm.riscv.vfnmacc.nxv1f16.nxv1f16(
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ i64);
+
+define <vscale x 1 x half> @intrinsic_vfnmacc_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv1f16_nxv1f16_nxv1f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu
+; CHECK-NEXT: vfnmacc.vv v16, v17, v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x half> @llvm.riscv.vfnmacc.nxv1f16.nxv1f16(
+ <vscale x 1 x half> %0,
+ <vscale x 1 x half> %1,
+ <vscale x 1 x half> %2,
+ i64 %3)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vfnmacc.mask.nxv1f16.nxv1f16(
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x half> @intrinsic_vfnmacc_mask_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv1f16_nxv1f16_nxv1f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu
+; CHECK-NEXT: vfnmacc.vv v16, v17, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x half> @llvm.riscv.vfnmacc.mask.nxv1f16.nxv1f16(
+ <vscale x 1 x half> %0,
+ <vscale x 1 x half> %1,
+ <vscale x 1 x half> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 2 x half> @llvm.riscv.vfnmacc.nxv2f16.nxv2f16(
+ <vscale x 2 x half>,
+ <vscale x 2 x half>,
+ <vscale x 2 x half>,
+ i64);
+
+define <vscale x 2 x half> @intrinsic_vfnmacc_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv2f16_nxv2f16_nxv2f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu
+; CHECK-NEXT: vfnmacc.vv v16, v17, v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x half> @llvm.riscv.vfnmacc.nxv2f16.nxv2f16(
+ <vscale x 2 x half> %0,
+ <vscale x 2 x half> %1,
+ <vscale x 2 x half> %2,
+ i64 %3)
+
+ ret <vscale x 2 x half> %a
+}
+
+declare <vscale x 2 x half> @llvm.riscv.vfnmacc.mask.nxv2f16.nxv2f16(
+ <vscale x 2 x half>,
+ <vscale x 2 x half>,
+ <vscale x 2 x half>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x half> @intrinsic_vfnmacc_mask_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv2f16_nxv2f16_nxv2f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu
+; CHECK-NEXT: vfnmacc.vv v16, v17, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x half> @llvm.riscv.vfnmacc.mask.nxv2f16.nxv2f16(
+ <vscale x 2 x half> %0,
+ <vscale x 2 x half> %1,
+ <vscale x 2 x half> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vfnmacc.nxv4f16.nxv4f16(
+ <vscale x 4 x half>,
+ <vscale x 4 x half>,
+ <vscale x 4 x half>,
+ i64);
+
+define <vscale x 4 x half> @intrinsic_vfnmacc_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv4f16_nxv4f16_nxv4f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu
+; CHECK-NEXT: vfnmacc.vv v16, v17, v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x half> @llvm.riscv.vfnmacc.nxv4f16.nxv4f16(
+ <vscale x 4 x half> %0,
+ <vscale x 4 x half> %1,
+ <vscale x 4 x half> %2,
+ i64 %3)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vfnmacc.mask.nxv4f16.nxv4f16(
+ <vscale x 4 x half>,
+ <vscale x 4 x half>,
+ <vscale x 4 x half>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x half> @intrinsic_vfnmacc_mask_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv4f16_nxv4f16_nxv4f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu
+; CHECK-NEXT: vfnmacc.vv v16, v17, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x half> @llvm.riscv.vfnmacc.mask.nxv4f16.nxv4f16(
+ <vscale x 4 x half> %0,
+ <vscale x 4 x half> %1,
+ <vscale x 4 x half> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 8 x half> @llvm.riscv.vfnmacc.nxv8f16.nxv8f16(
+ <vscale x 8 x half>,
+ <vscale x 8 x half>,
+ <vscale x 8 x half>,
+ i64);
+
+define <vscale x 8 x half> @intrinsic_vfnmacc_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv8f16_nxv8f16_nxv8f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu
+; CHECK-NEXT: vfnmacc.vv v16, v18, v20
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x half> @llvm.riscv.vfnmacc.nxv8f16.nxv8f16(
+ <vscale x 8 x half> %0,
+ <vscale x 8 x half> %1,
+ <vscale x 8 x half> %2,
+ i64 %3)
+
+ ret <vscale x 8 x half> %a
+}
+
+declare <vscale x 8 x half> @llvm.riscv.vfnmacc.mask.nxv8f16.nxv8f16(
+ <vscale x 8 x half>,
+ <vscale x 8 x half>,
+ <vscale x 8 x half>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x half> @intrinsic_vfnmacc_mask_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv8f16_nxv8f16_nxv8f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu
+; CHECK-NEXT: vfnmacc.vv v16, v18, v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x half> @llvm.riscv.vfnmacc.mask.nxv8f16.nxv8f16(
+ <vscale x 8 x half> %0,
+ <vscale x 8 x half> %1,
+ <vscale x 8 x half> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x half> %a
+}
+
+declare <vscale x 16 x half> @llvm.riscv.vfnmacc.nxv16f16.nxv16f16(
+ <vscale x 16 x half>,
+ <vscale x 16 x half>,
+ <vscale x 16 x half>,
+ i64);
+
+define <vscale x 16 x half> @intrinsic_vfnmacc_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv16f16_nxv16f16_nxv16f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, zero, e16,m4,ta,mu
+; CHECK-NEXT: vle16.v v28, (a0)
+; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu
+; CHECK-NEXT: vfnmacc.vv v16, v20, v28
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x half> @llvm.riscv.vfnmacc.nxv16f16.nxv16f16(
+ <vscale x 16 x half> %0,
+ <vscale x 16 x half> %1,
+ <vscale x 16 x half> %2,
+ i64 %3)
+
+ ret <vscale x 16 x half> %a
+}
+
+declare <vscale x 16 x half> @llvm.riscv.vfnmacc.mask.nxv16f16.nxv16f16(
+ <vscale x 16 x half>,
+ <vscale x 16 x half>,
+ <vscale x 16 x half>,
+ <vscale x 16 x i1>,
+ i64);
+
+define <vscale x 16 x half> @intrinsic_vfnmacc_mask_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv16f16_nxv16f16_nxv16f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, zero, e16,m4,ta,mu
+; CHECK-NEXT: vle16.v v28, (a0)
+; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu
+; CHECK-NEXT: vfnmacc.vv v16, v20, v28, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x half> @llvm.riscv.vfnmacc.mask.nxv16f16.nxv16f16(
+ <vscale x 16 x half> %0,
+ <vscale x 16 x half> %1,
+ <vscale x 16 x half> %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 16 x half> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vfnmacc.nxv1f32.nxv1f32(
+ <vscale x 1 x float>,
+ <vscale x 1 x float>,
+ <vscale x 1 x float>,
+ i64);
+
+define <vscale x 1 x float> @intrinsic_vfnmacc_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv1f32_nxv1f32_nxv1f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu
+; CHECK-NEXT: vfnmacc.vv v16, v17, v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vfnmacc.nxv1f32.nxv1f32(
+ <vscale x 1 x float> %0,
+ <vscale x 1 x float> %1,
+ <vscale x 1 x float> %2,
+ i64 %3)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vfnmacc.mask.nxv1f32.nxv1f32(
+ <vscale x 1 x float>,
+ <vscale x 1 x float>,
+ <vscale x 1 x float>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x float> @intrinsic_vfnmacc_mask_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv1f32_nxv1f32_nxv1f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu
+; CHECK-NEXT: vfnmacc.vv v16, v17, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vfnmacc.mask.nxv1f32.nxv1f32(
+ <vscale x 1 x float> %0,
+ <vscale x 1 x float> %1,
+ <vscale x 1 x float> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfnmacc.nxv2f32.nxv2f32(
+ <vscale x 2 x float>,
+ <vscale x 2 x float>,
+ <vscale x 2 x float>,
+ i64);
+
+define <vscale x 2 x float> @intrinsic_vfnmacc_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv2f32_nxv2f32_nxv2f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu
+; CHECK-NEXT: vfnmacc.vv v16, v17, v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x float> @llvm.riscv.vfnmacc.nxv2f32.nxv2f32(
+ <vscale x 2 x float> %0,
+ <vscale x 2 x float> %1,
+ <vscale x 2 x float> %2,
+ i64 %3)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfnmacc.mask.nxv2f32.nxv2f32(
+ <vscale x 2 x float>,
+ <vscale x 2 x float>,
+ <vscale x 2 x float>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x float> @intrinsic_vfnmacc_mask_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv2f32_nxv2f32_nxv2f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu
+; CHECK-NEXT: vfnmacc.vv v16, v17, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x float> @llvm.riscv.vfnmacc.mask.nxv2f32.nxv2f32(
+ <vscale x 2 x float> %0,
+ <vscale x 2 x float> %1,
+ <vscale x 2 x float> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vfnmacc.nxv4f32.nxv4f32(
+ <vscale x 4 x float>,
+ <vscale x 4 x float>,
+ <vscale x 4 x float>,
+ i64);
+
+define <vscale x 4 x float> @intrinsic_vfnmacc_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv4f32_nxv4f32_nxv4f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu
+; CHECK-NEXT: vfnmacc.vv v16, v18, v20
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x float> @llvm.riscv.vfnmacc.nxv4f32.nxv4f32(
+ <vscale x 4 x float> %0,
+ <vscale x 4 x float> %1,
+ <vscale x 4 x float> %2,
+ i64 %3)
+
+ ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vfnmacc.mask.nxv4f32.nxv4f32(
+ <vscale x 4 x float>,
+ <vscale x 4 x float>,
+ <vscale x 4 x float>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x float> @intrinsic_vfnmacc_mask_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv4f32_nxv4f32_nxv4f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu
+; CHECK-NEXT: vfnmacc.vv v16, v18, v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x float> @llvm.riscv.vfnmacc.mask.nxv4f32.nxv4f32(
+ <vscale x 4 x float> %0,
+ <vscale x 4 x float> %1,
+ <vscale x 4 x float> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vfnmacc.nxv8f32.nxv8f32(
+ <vscale x 8 x float>,
+ <vscale x 8 x float>,
+ <vscale x 8 x float>,
+ i64);
+
+define <vscale x 8 x float> @intrinsic_vfnmacc_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv8f32_nxv8f32_nxv8f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, zero, e32,m4,ta,mu
+; CHECK-NEXT: vle32.v v28, (a0)
+; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu
+; CHECK-NEXT: vfnmacc.vv v16, v20, v28
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x float> @llvm.riscv.vfnmacc.nxv8f32.nxv8f32(
+ <vscale x 8 x float> %0,
+ <vscale x 8 x float> %1,
+ <vscale x 8 x float> %2,
+ i64 %3)
+
+ ret <vscale x 8 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vfnmacc.mask.nxv8f32.nxv8f32(
+ <vscale x 8 x float>,
+ <vscale x 8 x float>,
+ <vscale x 8 x float>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x float> @intrinsic_vfnmacc_mask_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv8f32_nxv8f32_nxv8f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, zero, e32,m4,ta,mu
+; CHECK-NEXT: vle32.v v28, (a0)
+; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu
+; CHECK-NEXT: vfnmacc.vv v16, v20, v28, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x float> @llvm.riscv.vfnmacc.mask.nxv8f32.nxv8f32(
+ <vscale x 8 x float> %0,
+ <vscale x 8 x float> %1,
+ <vscale x 8 x float> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x float> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vfnmacc.nxv1f64.nxv1f64(
+ <vscale x 1 x double>,
+ <vscale x 1 x double>,
+ <vscale x 1 x double>,
+ i64);
+
+define <vscale x 1 x double> @intrinsic_vfnmacc_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv1f64_nxv1f64_nxv1f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu
+; CHECK-NEXT: vfnmacc.vv v16, v17, v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x double> @llvm.riscv.vfnmacc.nxv1f64.nxv1f64(
+ <vscale x 1 x double> %0,
+ <vscale x 1 x double> %1,
+ <vscale x 1 x double> %2,
+ i64 %3)
+
+ ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vfnmacc.mask.nxv1f64.nxv1f64(
+ <vscale x 1 x double>,
+ <vscale x 1 x double>,
+ <vscale x 1 x double>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x double> @intrinsic_vfnmacc_mask_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv1f64_nxv1f64_nxv1f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu
+; CHECK-NEXT: vfnmacc.vv v16, v17, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x double> @llvm.riscv.vfnmacc.mask.nxv1f64.nxv1f64(
+ <vscale x 1 x double> %0,
+ <vscale x 1 x double> %1,
+ <vscale x 1 x double> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vfnmacc.nxv2f64.nxv2f64(
+ <vscale x 2 x double>,
+ <vscale x 2 x double>,
+ <vscale x 2 x double>,
+ i64);
+
+define <vscale x 2 x double> @intrinsic_vfnmacc_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv2f64_nxv2f64_nxv2f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu
+; CHECK-NEXT: vfnmacc.vv v16, v18, v20
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x double> @llvm.riscv.vfnmacc.nxv2f64.nxv2f64(
+ <vscale x 2 x double> %0,
+ <vscale x 2 x double> %1,
+ <vscale x 2 x double> %2,
+ i64 %3)
+
+ ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vfnmacc.mask.nxv2f64.nxv2f64(
+ <vscale x 2 x double>,
+ <vscale x 2 x double>,
+ <vscale x 2 x double>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x double> @intrinsic_vfnmacc_mask_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv2f64_nxv2f64_nxv2f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu
+; CHECK-NEXT: vfnmacc.vv v16, v18, v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x double> @llvm.riscv.vfnmacc.mask.nxv2f64.nxv2f64(
+ <vscale x 2 x double> %0,
+ <vscale x 2 x double> %1,
+ <vscale x 2 x double> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vfnmacc.nxv4f64.nxv4f64(
+ <vscale x 4 x double>,
+ <vscale x 4 x double>,
+ <vscale x 4 x double>,
+ i64);
+
+define <vscale x 4 x double> @intrinsic_vfnmacc_vv_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv4f64_nxv4f64_nxv4f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, zero, e64,m4,ta,mu
+; CHECK-NEXT: vle64.v v28, (a0)
+; CHECK-NEXT: vsetvli a0, a1, e64,m4,ta,mu
+; CHECK-NEXT: vfnmacc.vv v16, v20, v28
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x double> @llvm.riscv.vfnmacc.nxv4f64.nxv4f64(
+ <vscale x 4 x double> %0,
+ <vscale x 4 x double> %1,
+ <vscale x 4 x double> %2,
+ i64 %3)
+
+ ret <vscale x 4 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vfnmacc.mask.nxv4f64.nxv4f64(
+ <vscale x 4 x double>,
+ <vscale x 4 x double>,
+ <vscale x 4 x double>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x double> @intrinsic_vfnmacc_mask_vv_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv4f64_nxv4f64_nxv4f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, zero, e64,m4,ta,mu
+; CHECK-NEXT: vle64.v v28, (a0)
+; CHECK-NEXT: vsetvli a0, a1, e64,m4,ta,mu
+; CHECK-NEXT: vfnmacc.vv v16, v20, v28, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x double> @llvm.riscv.vfnmacc.mask.nxv4f64.nxv4f64(
+ <vscale x 4 x double> %0,
+ <vscale x 4 x double> %1,
+ <vscale x 4 x double> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x double> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vfnmacc.nxv1f16.f16(
+ <vscale x 1 x half>,
+ half,
+ <vscale x 1 x half>,
+ i64);
+
+define <vscale x 1 x half> @intrinsic_vfnmacc_vf_nxv1f16_f16_nxv1f16(<vscale x 1 x half> %0, half %1, <vscale x 1 x half> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv1f16_f16_nxv1f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.h.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e16,mf4,ta,mu
+; CHECK-NEXT: vfnmacc.vf v16, ft0, v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x half> @llvm.riscv.vfnmacc.nxv1f16.f16(
+ <vscale x 1 x half> %0,
+ half %1,
+ <vscale x 1 x half> %2,
+ i64 %3)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vfnmacc.mask.nxv1f16.f16(
+ <vscale x 1 x half>,
+ half,
+ <vscale x 1 x half>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x half> @intrinsic_vfnmacc_mask_vf_nxv1f16_f16_nxv1f16(<vscale x 1 x half> %0, half %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv1f16_f16_nxv1f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.h.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e16,mf4,ta,mu
+; CHECK-NEXT: vfnmacc.vf v16, ft0, v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x half> @llvm.riscv.vfnmacc.mask.nxv1f16.f16(
+ <vscale x 1 x half> %0,
+ half %1,
+ <vscale x 1 x half> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 2 x half> @llvm.riscv.vfnmacc.nxv2f16.f16(
+ <vscale x 2 x half>,
+ half,
+ <vscale x 2 x half>,
+ i64);
+
+define <vscale x 2 x half> @intrinsic_vfnmacc_vf_nxv2f16_f16_nxv2f16(<vscale x 2 x half> %0, half %1, <vscale x 2 x half> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv2f16_f16_nxv2f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.h.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e16,mf2,ta,mu
+; CHECK-NEXT: vfnmacc.vf v16, ft0, v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x half> @llvm.riscv.vfnmacc.nxv2f16.f16(
+ <vscale x 2 x half> %0,
+ half %1,
+ <vscale x 2 x half> %2,
+ i64 %3)
+
+ ret <vscale x 2 x half> %a
+}
+
+declare <vscale x 2 x half> @llvm.riscv.vfnmacc.mask.nxv2f16.f16(
+ <vscale x 2 x half>,
+ half,
+ <vscale x 2 x half>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x half> @intrinsic_vfnmacc_mask_vf_nxv2f16_f16_nxv2f16(<vscale x 2 x half> %0, half %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv2f16_f16_nxv2f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.h.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e16,mf2,ta,mu
+; CHECK-NEXT: vfnmacc.vf v16, ft0, v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x half> @llvm.riscv.vfnmacc.mask.nxv2f16.f16(
+ <vscale x 2 x half> %0,
+ half %1,
+ <vscale x 2 x half> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vfnmacc.nxv4f16.f16(
+ <vscale x 4 x half>,
+ half,
+ <vscale x 4 x half>,
+ i64);
+
+define <vscale x 4 x half> @intrinsic_vfnmacc_vf_nxv4f16_f16_nxv4f16(<vscale x 4 x half> %0, half %1, <vscale x 4 x half> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv4f16_f16_nxv4f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.h.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e16,m1,ta,mu
+; CHECK-NEXT: vfnmacc.vf v16, ft0, v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x half> @llvm.riscv.vfnmacc.nxv4f16.f16(
+ <vscale x 4 x half> %0,
+ half %1,
+ <vscale x 4 x half> %2,
+ i64 %3)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vfnmacc.mask.nxv4f16.f16(
+ <vscale x 4 x half>,
+ half,
+ <vscale x 4 x half>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x half> @intrinsic_vfnmacc_mask_vf_nxv4f16_f16_nxv4f16(<vscale x 4 x half> %0, half %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv4f16_f16_nxv4f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.h.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e16,m1,ta,mu
+; CHECK-NEXT: vfnmacc.vf v16, ft0, v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x half> @llvm.riscv.vfnmacc.mask.nxv4f16.f16(
+ <vscale x 4 x half> %0,
+ half %1,
+ <vscale x 4 x half> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 8 x half> @llvm.riscv.vfnmacc.nxv8f16.f16(
+ <vscale x 8 x half>,
+ half,
+ <vscale x 8 x half>,
+ i64);
+
+define <vscale x 8 x half> @intrinsic_vfnmacc_vf_nxv8f16_f16_nxv8f16(<vscale x 8 x half> %0, half %1, <vscale x 8 x half> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv8f16_f16_nxv8f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.h.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e16,m2,ta,mu
+; CHECK-NEXT: vfnmacc.vf v16, ft0, v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x half> @llvm.riscv.vfnmacc.nxv8f16.f16(
+ <vscale x 8 x half> %0,
+ half %1,
+ <vscale x 8 x half> %2,
+ i64 %3)
+
+ ret <vscale x 8 x half> %a
+}
+
+declare <vscale x 8 x half> @llvm.riscv.vfnmacc.mask.nxv8f16.f16(
+ <vscale x 8 x half>,
+ half,
+ <vscale x 8 x half>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x half> @intrinsic_vfnmacc_mask_vf_nxv8f16_f16_nxv8f16(<vscale x 8 x half> %0, half %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv8f16_f16_nxv8f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.h.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e16,m2,ta,mu
+; CHECK-NEXT: vfnmacc.vf v16, ft0, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x half> @llvm.riscv.vfnmacc.mask.nxv8f16.f16(
+ <vscale x 8 x half> %0,
+ half %1,
+ <vscale x 8 x half> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x half> %a
+}
+
+declare <vscale x 16 x half> @llvm.riscv.vfnmacc.nxv16f16.f16(
+ <vscale x 16 x half>,
+ half,
+ <vscale x 16 x half>,
+ i64);
+
+define <vscale x 16 x half> @intrinsic_vfnmacc_vf_nxv16f16_f16_nxv16f16(<vscale x 16 x half> %0, half %1, <vscale x 16 x half> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv16f16_f16_nxv16f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.h.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu
+; CHECK-NEXT: vfnmacc.vf v16, ft0, v20
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x half> @llvm.riscv.vfnmacc.nxv16f16.f16(
+ <vscale x 16 x half> %0,
+ half %1,
+ <vscale x 16 x half> %2,
+ i64 %3)
+
+ ret <vscale x 16 x half> %a
+}
+
+declare <vscale x 16 x half> @llvm.riscv.vfnmacc.mask.nxv16f16.f16(
+ <vscale x 16 x half>,
+ half,
+ <vscale x 16 x half>,
+ <vscale x 16 x i1>,
+ i64);
+
+define <vscale x 16 x half> @intrinsic_vfnmacc_mask_vf_nxv16f16_f16_nxv16f16(<vscale x 16 x half> %0, half %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv16f16_f16_nxv16f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.h.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu
+; CHECK-NEXT: vfnmacc.vf v16, ft0, v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x half> @llvm.riscv.vfnmacc.mask.nxv16f16.f16(
+ <vscale x 16 x half> %0,
+ half %1,
+ <vscale x 16 x half> %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 16 x half> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vfnmacc.nxv1f32.f32(
+ <vscale x 1 x float>,
+ float,
+ <vscale x 1 x float>,
+ i64);
+
+define <vscale x 1 x float> @intrinsic_vfnmacc_vf_nxv1f32_f32_nxv1f32(<vscale x 1 x float> %0, float %1, <vscale x 1 x float> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv1f32_f32_nxv1f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.w.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e32,mf2,ta,mu
+; CHECK-NEXT: vfnmacc.vf v16, ft0, v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vfnmacc.nxv1f32.f32(
+ <vscale x 1 x float> %0,
+ float %1,
+ <vscale x 1 x float> %2,
+ i64 %3)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vfnmacc.mask.nxv1f32.f32(
+ <vscale x 1 x float>,
+ float,
+ <vscale x 1 x float>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x float> @intrinsic_vfnmacc_mask_vf_nxv1f32_f32_nxv1f32(<vscale x 1 x float> %0, float %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv1f32_f32_nxv1f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.w.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e32,mf2,ta,mu
+; CHECK-NEXT: vfnmacc.vf v16, ft0, v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vfnmacc.mask.nxv1f32.f32(
+ <vscale x 1 x float> %0,
+ float %1,
+ <vscale x 1 x float> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfnmacc.nxv2f32.f32(
+ <vscale x 2 x float>,
+ float,
+ <vscale x 2 x float>,
+ i64);
+
+define <vscale x 2 x float> @intrinsic_vfnmacc_vf_nxv2f32_f32_nxv2f32(<vscale x 2 x float> %0, float %1, <vscale x 2 x float> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv2f32_f32_nxv2f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.w.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e32,m1,ta,mu
+; CHECK-NEXT: vfnmacc.vf v16, ft0, v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x float> @llvm.riscv.vfnmacc.nxv2f32.f32(
+ <vscale x 2 x float> %0,
+ float %1,
+ <vscale x 2 x float> %2,
+ i64 %3)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfnmacc.mask.nxv2f32.f32(
+ <vscale x 2 x float>,
+ float,
+ <vscale x 2 x float>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x float> @intrinsic_vfnmacc_mask_vf_nxv2f32_f32_nxv2f32(<vscale x 2 x float> %0, float %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv2f32_f32_nxv2f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.w.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e32,m1,ta,mu
+; CHECK-NEXT: vfnmacc.vf v16, ft0, v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x float> @llvm.riscv.vfnmacc.mask.nxv2f32.f32(
+ <vscale x 2 x float> %0,
+ float %1,
+ <vscale x 2 x float> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vfnmacc.nxv4f32.f32(
+ <vscale x 4 x float>,
+ float,
+ <vscale x 4 x float>,
+ i64);
+
+define <vscale x 4 x float> @intrinsic_vfnmacc_vf_nxv4f32_f32_nxv4f32(<vscale x 4 x float> %0, float %1, <vscale x 4 x float> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv4f32_f32_nxv4f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.w.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e32,m2,ta,mu
+; CHECK-NEXT: vfnmacc.vf v16, ft0, v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x float> @llvm.riscv.vfnmacc.nxv4f32.f32(
+ <vscale x 4 x float> %0,
+ float %1,
+ <vscale x 4 x float> %2,
+ i64 %3)
+
+ ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vfnmacc.mask.nxv4f32.f32(
+ <vscale x 4 x float>,
+ float,
+ <vscale x 4 x float>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x float> @intrinsic_vfnmacc_mask_vf_nxv4f32_f32_nxv4f32(<vscale x 4 x float> %0, float %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv4f32_f32_nxv4f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.w.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e32,m2,ta,mu
+; CHECK-NEXT: vfnmacc.vf v16, ft0, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x float> @llvm.riscv.vfnmacc.mask.nxv4f32.f32(
+ <vscale x 4 x float> %0,
+ float %1,
+ <vscale x 4 x float> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vfnmacc.nxv8f32.f32(
+ <vscale x 8 x float>,
+ float,
+ <vscale x 8 x float>,
+ i64);
+
+define <vscale x 8 x float> @intrinsic_vfnmacc_vf_nxv8f32_f32_nxv8f32(<vscale x 8 x float> %0, float %1, <vscale x 8 x float> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv8f32_f32_nxv8f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.w.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu
+; CHECK-NEXT: vfnmacc.vf v16, ft0, v20
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x float> @llvm.riscv.vfnmacc.nxv8f32.f32(
+ <vscale x 8 x float> %0,
+ float %1,
+ <vscale x 8 x float> %2,
+ i64 %3)
+
+ ret <vscale x 8 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vfnmacc.mask.nxv8f32.f32(
+ <vscale x 8 x float>,
+ float,
+ <vscale x 8 x float>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x float> @intrinsic_vfnmacc_mask_vf_nxv8f32_f32_nxv8f32(<vscale x 8 x float> %0, float %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv8f32_f32_nxv8f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.w.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu
+; CHECK-NEXT: vfnmacc.vf v16, ft0, v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x float> @llvm.riscv.vfnmacc.mask.nxv8f32.f32(
+ <vscale x 8 x float> %0,
+ float %1,
+ <vscale x 8 x float> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x float> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vfnmacc.nxv1f64.f64(
+ <vscale x 1 x double>,
+ double,
+ <vscale x 1 x double>,
+ i64);
+
+define <vscale x 1 x double> @intrinsic_vfnmacc_vf_nxv1f64_f64_nxv1f64(<vscale x 1 x double> %0, double %1, <vscale x 1 x double> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv1f64_f64_nxv1f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.d.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e64,m1,ta,mu
+; CHECK-NEXT: vfnmacc.vf v16, ft0, v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x double> @llvm.riscv.vfnmacc.nxv1f64.f64(
+ <vscale x 1 x double> %0,
+ double %1,
+ <vscale x 1 x double> %2,
+ i64 %3)
+
+ ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vfnmacc.mask.nxv1f64.f64(
+ <vscale x 1 x double>,
+ double,
+ <vscale x 1 x double>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x double> @intrinsic_vfnmacc_mask_vf_nxv1f64_f64_nxv1f64(<vscale x 1 x double> %0, double %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv1f64_f64_nxv1f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.d.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e64,m1,ta,mu
+; CHECK-NEXT: vfnmacc.vf v16, ft0, v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x double> @llvm.riscv.vfnmacc.mask.nxv1f64.f64(
+ <vscale x 1 x double> %0,
+ double %1,
+ <vscale x 1 x double> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vfnmacc.nxv2f64.f64(
+ <vscale x 2 x double>,
+ double,
+ <vscale x 2 x double>,
+ i64);
+
+define <vscale x 2 x double> @intrinsic_vfnmacc_vf_nxv2f64_f64_nxv2f64(<vscale x 2 x double> %0, double %1, <vscale x 2 x double> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv2f64_f64_nxv2f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.d.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e64,m2,ta,mu
+; CHECK-NEXT: vfnmacc.vf v16, ft0, v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x double> @llvm.riscv.vfnmacc.nxv2f64.f64(
+ <vscale x 2 x double> %0,
+ double %1,
+ <vscale x 2 x double> %2,
+ i64 %3)
+
+ ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vfnmacc.mask.nxv2f64.f64(
+ <vscale x 2 x double>,
+ double,
+ <vscale x 2 x double>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x double> @intrinsic_vfnmacc_mask_vf_nxv2f64_f64_nxv2f64(<vscale x 2 x double> %0, double %1, <vscale x 2 x double> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv2f64_f64_nxv2f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.d.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e64,m2,ta,mu
+; CHECK-NEXT: vfnmacc.vf v16, ft0, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x double> @llvm.riscv.vfnmacc.mask.nxv2f64.f64(
+ <vscale x 2 x double> %0,
+ double %1,
+ <vscale x 2 x double> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vfnmacc.nxv4f64.f64(
+ <vscale x 4 x double>,
+ double,
+ <vscale x 4 x double>,
+ i64);
+
+define <vscale x 4 x double> @intrinsic_vfnmacc_vf_nxv4f64_f64_nxv4f64(<vscale x 4 x double> %0, double %1, <vscale x 4 x double> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv4f64_f64_nxv4f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.d.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e64,m4,ta,mu
+; CHECK-NEXT: vfnmacc.vf v16, ft0, v20
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x double> @llvm.riscv.vfnmacc.nxv4f64.f64(
+ <vscale x 4 x double> %0,
+ double %1,
+ <vscale x 4 x double> %2,
+ i64 %3)
+
+ ret <vscale x 4 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vfnmacc.mask.nxv4f64.f64(
+ <vscale x 4 x double>,
+ double,
+ <vscale x 4 x double>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x double> @intrinsic_vfnmacc_mask_vf_nxv4f64_f64_nxv4f64(<vscale x 4 x double> %0, double %1, <vscale x 4 x double> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv4f64_f64_nxv4f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.d.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e64,m4,ta,mu
+; CHECK-NEXT: vfnmacc.vf v16, ft0, v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x double> @llvm.riscv.vfnmacc.mask.nxv4f64.f64(
+ <vscale x 4 x double> %0,
+ double %1,
+ <vscale x 4 x double> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x double> %a
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfnmadd-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfnmadd-rv32.ll
new file mode 100644
index 000000000000..abea8d688fa3
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vfnmadd-rv32.ll
@@ -0,0 +1,856 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f,+experimental-zfh -verify-machineinstrs \
+; RUN: --riscv-no-aliases < %s | FileCheck %s
+declare <vscale x 1 x half> @llvm.riscv.vfnmadd.nxv1f16.nxv1f16(
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ i32);
+
+define <vscale x 1 x half> @intrinsic_vfnmadd_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv1f16_nxv1f16_nxv1f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu
+; CHECK-NEXT: vfnmadd.vv v16, v17, v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x half> @llvm.riscv.vfnmadd.nxv1f16.nxv1f16(
+ <vscale x 1 x half> %0,
+ <vscale x 1 x half> %1,
+ <vscale x 1 x half> %2,
+ i32 %3)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vfnmadd.mask.nxv1f16.nxv1f16(
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x half> @intrinsic_vfnmadd_mask_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv1f16_nxv1f16_nxv1f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu
+; CHECK-NEXT: vfnmadd.vv v16, v17, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x half> @llvm.riscv.vfnmadd.mask.nxv1f16.nxv1f16(
+ <vscale x 1 x half> %0,
+ <vscale x 1 x half> %1,
+ <vscale x 1 x half> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 2 x half> @llvm.riscv.vfnmadd.nxv2f16.nxv2f16(
+ <vscale x 2 x half>,
+ <vscale x 2 x half>,
+ <vscale x 2 x half>,
+ i32);
+
+define <vscale x 2 x half> @intrinsic_vfnmadd_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv2f16_nxv2f16_nxv2f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu
+; CHECK-NEXT: vfnmadd.vv v16, v17, v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x half> @llvm.riscv.vfnmadd.nxv2f16.nxv2f16(
+ <vscale x 2 x half> %0,
+ <vscale x 2 x half> %1,
+ <vscale x 2 x half> %2,
+ i32 %3)
+
+ ret <vscale x 2 x half> %a
+}
+
+declare <vscale x 2 x half> @llvm.riscv.vfnmadd.mask.nxv2f16.nxv2f16(
+ <vscale x 2 x half>,
+ <vscale x 2 x half>,
+ <vscale x 2 x half>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x half> @intrinsic_vfnmadd_mask_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv2f16_nxv2f16_nxv2f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu
+; CHECK-NEXT: vfnmadd.vv v16, v17, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x half> @llvm.riscv.vfnmadd.mask.nxv2f16.nxv2f16(
+ <vscale x 2 x half> %0,
+ <vscale x 2 x half> %1,
+ <vscale x 2 x half> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vfnmadd.nxv4f16.nxv4f16(
+ <vscale x 4 x half>,
+ <vscale x 4 x half>,
+ <vscale x 4 x half>,
+ i32);
+
+define <vscale x 4 x half> @intrinsic_vfnmadd_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv4f16_nxv4f16_nxv4f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu
+; CHECK-NEXT: vfnmadd.vv v16, v17, v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x half> @llvm.riscv.vfnmadd.nxv4f16.nxv4f16(
+ <vscale x 4 x half> %0,
+ <vscale x 4 x half> %1,
+ <vscale x 4 x half> %2,
+ i32 %3)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vfnmadd.mask.nxv4f16.nxv4f16(
+ <vscale x 4 x half>,
+ <vscale x 4 x half>,
+ <vscale x 4 x half>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x half> @intrinsic_vfnmadd_mask_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv4f16_nxv4f16_nxv4f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu
+; CHECK-NEXT: vfnmadd.vv v16, v17, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x half> @llvm.riscv.vfnmadd.mask.nxv4f16.nxv4f16(
+ <vscale x 4 x half> %0,
+ <vscale x 4 x half> %1,
+ <vscale x 4 x half> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 8 x half> @llvm.riscv.vfnmadd.nxv8f16.nxv8f16(
+ <vscale x 8 x half>,
+ <vscale x 8 x half>,
+ <vscale x 8 x half>,
+ i32);
+
+define <vscale x 8 x half> @intrinsic_vfnmadd_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv8f16_nxv8f16_nxv8f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu
+; CHECK-NEXT: vfnmadd.vv v16, v18, v20
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x half> @llvm.riscv.vfnmadd.nxv8f16.nxv8f16(
+ <vscale x 8 x half> %0,
+ <vscale x 8 x half> %1,
+ <vscale x 8 x half> %2,
+ i32 %3)
+
+ ret <vscale x 8 x half> %a
+}
+
+declare <vscale x 8 x half> @llvm.riscv.vfnmadd.mask.nxv8f16.nxv8f16(
+ <vscale x 8 x half>,
+ <vscale x 8 x half>,
+ <vscale x 8 x half>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x half> @intrinsic_vfnmadd_mask_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv8f16_nxv8f16_nxv8f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu
+; CHECK-NEXT: vfnmadd.vv v16, v18, v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x half> @llvm.riscv.vfnmadd.mask.nxv8f16.nxv8f16(
+ <vscale x 8 x half> %0,
+ <vscale x 8 x half> %1,
+ <vscale x 8 x half> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x half> %a
+}
+
+declare <vscale x 16 x half> @llvm.riscv.vfnmadd.nxv16f16.nxv16f16(
+ <vscale x 16 x half>,
+ <vscale x 16 x half>,
+ <vscale x 16 x half>,
+ i32);
+
+define <vscale x 16 x half> @intrinsic_vfnmadd_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv16f16_nxv16f16_nxv16f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, zero, e16,m4,ta,mu
+; CHECK-NEXT: vle16.v v28, (a0)
+; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu
+; CHECK-NEXT: vfnmadd.vv v16, v20, v28
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x half> @llvm.riscv.vfnmadd.nxv16f16.nxv16f16(
+ <vscale x 16 x half> %0,
+ <vscale x 16 x half> %1,
+ <vscale x 16 x half> %2,
+ i32 %3)
+
+ ret <vscale x 16 x half> %a
+}
+
+declare <vscale x 16 x half> @llvm.riscv.vfnmadd.mask.nxv16f16.nxv16f16(
+ <vscale x 16 x half>,
+ <vscale x 16 x half>,
+ <vscale x 16 x half>,
+ <vscale x 16 x i1>,
+ i32);
+
+define <vscale x 16 x half> @intrinsic_vfnmadd_mask_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv16f16_nxv16f16_nxv16f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, zero, e16,m4,ta,mu
+; CHECK-NEXT: vle16.v v28, (a0)
+; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu
+; CHECK-NEXT: vfnmadd.vv v16, v20, v28, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x half> @llvm.riscv.vfnmadd.mask.nxv16f16.nxv16f16(
+ <vscale x 16 x half> %0,
+ <vscale x 16 x half> %1,
+ <vscale x 16 x half> %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 16 x half> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vfnmadd.nxv1f32.nxv1f32(
+ <vscale x 1 x float>,
+ <vscale x 1 x float>,
+ <vscale x 1 x float>,
+ i32);
+
+define <vscale x 1 x float> @intrinsic_vfnmadd_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv1f32_nxv1f32_nxv1f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu
+; CHECK-NEXT: vfnmadd.vv v16, v17, v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vfnmadd.nxv1f32.nxv1f32(
+ <vscale x 1 x float> %0,
+ <vscale x 1 x float> %1,
+ <vscale x 1 x float> %2,
+ i32 %3)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vfnmadd.mask.nxv1f32.nxv1f32(
+ <vscale x 1 x float>,
+ <vscale x 1 x float>,
+ <vscale x 1 x float>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x float> @intrinsic_vfnmadd_mask_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv1f32_nxv1f32_nxv1f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu
+; CHECK-NEXT: vfnmadd.vv v16, v17, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vfnmadd.mask.nxv1f32.nxv1f32(
+ <vscale x 1 x float> %0,
+ <vscale x 1 x float> %1,
+ <vscale x 1 x float> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfnmadd.nxv2f32.nxv2f32(
+ <vscale x 2 x float>,
+ <vscale x 2 x float>,
+ <vscale x 2 x float>,
+ i32);
+
+define <vscale x 2 x float> @intrinsic_vfnmadd_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv2f32_nxv2f32_nxv2f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu
+; CHECK-NEXT: vfnmadd.vv v16, v17, v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x float> @llvm.riscv.vfnmadd.nxv2f32.nxv2f32(
+ <vscale x 2 x float> %0,
+ <vscale x 2 x float> %1,
+ <vscale x 2 x float> %2,
+ i32 %3)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfnmadd.mask.nxv2f32.nxv2f32(
+ <vscale x 2 x float>,
+ <vscale x 2 x float>,
+ <vscale x 2 x float>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x float> @intrinsic_vfnmadd_mask_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv2f32_nxv2f32_nxv2f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu
+; CHECK-NEXT: vfnmadd.vv v16, v17, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x float> @llvm.riscv.vfnmadd.mask.nxv2f32.nxv2f32(
+ <vscale x 2 x float> %0,
+ <vscale x 2 x float> %1,
+ <vscale x 2 x float> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vfnmadd.nxv4f32.nxv4f32(
+ <vscale x 4 x float>,
+ <vscale x 4 x float>,
+ <vscale x 4 x float>,
+ i32);
+
+define <vscale x 4 x float> @intrinsic_vfnmadd_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv4f32_nxv4f32_nxv4f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu
+; CHECK-NEXT: vfnmadd.vv v16, v18, v20
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x float> @llvm.riscv.vfnmadd.nxv4f32.nxv4f32(
+ <vscale x 4 x float> %0,
+ <vscale x 4 x float> %1,
+ <vscale x 4 x float> %2,
+ i32 %3)
+
+ ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vfnmadd.mask.nxv4f32.nxv4f32(
+ <vscale x 4 x float>,
+ <vscale x 4 x float>,
+ <vscale x 4 x float>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x float> @intrinsic_vfnmadd_mask_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv4f32_nxv4f32_nxv4f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu
+; CHECK-NEXT: vfnmadd.vv v16, v18, v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x float> @llvm.riscv.vfnmadd.mask.nxv4f32.nxv4f32(
+ <vscale x 4 x float> %0,
+ <vscale x 4 x float> %1,
+ <vscale x 4 x float> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vfnmadd.nxv8f32.nxv8f32(
+ <vscale x 8 x float>,
+ <vscale x 8 x float>,
+ <vscale x 8 x float>,
+ i32);
+
+define <vscale x 8 x float> @intrinsic_vfnmadd_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv8f32_nxv8f32_nxv8f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, zero, e32,m4,ta,mu
+; CHECK-NEXT: vle32.v v28, (a0)
+; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu
+; CHECK-NEXT: vfnmadd.vv v16, v20, v28
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x float> @llvm.riscv.vfnmadd.nxv8f32.nxv8f32(
+ <vscale x 8 x float> %0,
+ <vscale x 8 x float> %1,
+ <vscale x 8 x float> %2,
+ i32 %3)
+
+ ret <vscale x 8 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vfnmadd.mask.nxv8f32.nxv8f32(
+ <vscale x 8 x float>,
+ <vscale x 8 x float>,
+ <vscale x 8 x float>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x float> @intrinsic_vfnmadd_mask_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv8f32_nxv8f32_nxv8f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, zero, e32,m4,ta,mu
+; CHECK-NEXT: vle32.v v28, (a0)
+; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu
+; CHECK-NEXT: vfnmadd.vv v16, v20, v28, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x float> @llvm.riscv.vfnmadd.mask.nxv8f32.nxv8f32(
+ <vscale x 8 x float> %0,
+ <vscale x 8 x float> %1,
+ <vscale x 8 x float> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x float> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vfnmadd.nxv1f16.f16(
+ <vscale x 1 x half>,
+ half,
+ <vscale x 1 x half>,
+ i32);
+
+define <vscale x 1 x half> @intrinsic_vfnmadd_vf_nxv1f16_f16_nxv1f16(<vscale x 1 x half> %0, half %1, <vscale x 1 x half> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv1f16_f16_nxv1f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.h.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e16,mf4,ta,mu
+; CHECK-NEXT: vfnmadd.vf v16, ft0, v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x half> @llvm.riscv.vfnmadd.nxv1f16.f16(
+ <vscale x 1 x half> %0,
+ half %1,
+ <vscale x 1 x half> %2,
+ i32 %3)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vfnmadd.mask.nxv1f16.f16(
+ <vscale x 1 x half>,
+ half,
+ <vscale x 1 x half>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x half> @intrinsic_vfnmadd_mask_vf_nxv1f16_f16_nxv1f16(<vscale x 1 x half> %0, half %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv1f16_f16_nxv1f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.h.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e16,mf4,ta,mu
+; CHECK-NEXT: vfnmadd.vf v16, ft0, v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x half> @llvm.riscv.vfnmadd.mask.nxv1f16.f16(
+ <vscale x 1 x half> %0,
+ half %1,
+ <vscale x 1 x half> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 2 x half> @llvm.riscv.vfnmadd.nxv2f16.f16(
+ <vscale x 2 x half>,
+ half,
+ <vscale x 2 x half>,
+ i32);
+
+define <vscale x 2 x half> @intrinsic_vfnmadd_vf_nxv2f16_f16_nxv2f16(<vscale x 2 x half> %0, half %1, <vscale x 2 x half> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv2f16_f16_nxv2f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.h.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e16,mf2,ta,mu
+; CHECK-NEXT: vfnmadd.vf v16, ft0, v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x half> @llvm.riscv.vfnmadd.nxv2f16.f16(
+ <vscale x 2 x half> %0,
+ half %1,
+ <vscale x 2 x half> %2,
+ i32 %3)
+
+ ret <vscale x 2 x half> %a
+}
+
+declare <vscale x 2 x half> @llvm.riscv.vfnmadd.mask.nxv2f16.f16(
+ <vscale x 2 x half>,
+ half,
+ <vscale x 2 x half>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x half> @intrinsic_vfnmadd_mask_vf_nxv2f16_f16_nxv2f16(<vscale x 2 x half> %0, half %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv2f16_f16_nxv2f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.h.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e16,mf2,ta,mu
+; CHECK-NEXT: vfnmadd.vf v16, ft0, v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x half> @llvm.riscv.vfnmadd.mask.nxv2f16.f16(
+ <vscale x 2 x half> %0,
+ half %1,
+ <vscale x 2 x half> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vfnmadd.nxv4f16.f16(
+ <vscale x 4 x half>,
+ half,
+ <vscale x 4 x half>,
+ i32);
+
+define <vscale x 4 x half> @intrinsic_vfnmadd_vf_nxv4f16_f16_nxv4f16(<vscale x 4 x half> %0, half %1, <vscale x 4 x half> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv4f16_f16_nxv4f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.h.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e16,m1,ta,mu
+; CHECK-NEXT: vfnmadd.vf v16, ft0, v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x half> @llvm.riscv.vfnmadd.nxv4f16.f16(
+ <vscale x 4 x half> %0,
+ half %1,
+ <vscale x 4 x half> %2,
+ i32 %3)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vfnmadd.mask.nxv4f16.f16(
+ <vscale x 4 x half>,
+ half,
+ <vscale x 4 x half>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x half> @intrinsic_vfnmadd_mask_vf_nxv4f16_f16_nxv4f16(<vscale x 4 x half> %0, half %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv4f16_f16_nxv4f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.h.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e16,m1,ta,mu
+; CHECK-NEXT: vfnmadd.vf v16, ft0, v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x half> @llvm.riscv.vfnmadd.mask.nxv4f16.f16(
+ <vscale x 4 x half> %0,
+ half %1,
+ <vscale x 4 x half> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 8 x half> @llvm.riscv.vfnmadd.nxv8f16.f16(
+ <vscale x 8 x half>,
+ half,
+ <vscale x 8 x half>,
+ i32);
+
+define <vscale x 8 x half> @intrinsic_vfnmadd_vf_nxv8f16_f16_nxv8f16(<vscale x 8 x half> %0, half %1, <vscale x 8 x half> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv8f16_f16_nxv8f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.h.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e16,m2,ta,mu
+; CHECK-NEXT: vfnmadd.vf v16, ft0, v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x half> @llvm.riscv.vfnmadd.nxv8f16.f16(
+ <vscale x 8 x half> %0,
+ half %1,
+ <vscale x 8 x half> %2,
+ i32 %3)
+
+ ret <vscale x 8 x half> %a
+}
+
+declare <vscale x 8 x half> @llvm.riscv.vfnmadd.mask.nxv8f16.f16(
+ <vscale x 8 x half>,
+ half,
+ <vscale x 8 x half>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x half> @intrinsic_vfnmadd_mask_vf_nxv8f16_f16_nxv8f16(<vscale x 8 x half> %0, half %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv8f16_f16_nxv8f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.h.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e16,m2,ta,mu
+; CHECK-NEXT: vfnmadd.vf v16, ft0, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x half> @llvm.riscv.vfnmadd.mask.nxv8f16.f16(
+ <vscale x 8 x half> %0,
+ half %1,
+ <vscale x 8 x half> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x half> %a
+}
+
+declare <vscale x 16 x half> @llvm.riscv.vfnmadd.nxv16f16.f16(
+ <vscale x 16 x half>,
+ half,
+ <vscale x 16 x half>,
+ i32);
+
+define <vscale x 16 x half> @intrinsic_vfnmadd_vf_nxv16f16_f16_nxv16f16(<vscale x 16 x half> %0, half %1, <vscale x 16 x half> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv16f16_f16_nxv16f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.h.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu
+; CHECK-NEXT: vfnmadd.vf v16, ft0, v20
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x half> @llvm.riscv.vfnmadd.nxv16f16.f16(
+ <vscale x 16 x half> %0,
+ half %1,
+ <vscale x 16 x half> %2,
+ i32 %3)
+
+ ret <vscale x 16 x half> %a
+}
+
+declare <vscale x 16 x half> @llvm.riscv.vfnmadd.mask.nxv16f16.f16(
+ <vscale x 16 x half>,
+ half,
+ <vscale x 16 x half>,
+ <vscale x 16 x i1>,
+ i32);
+
+define <vscale x 16 x half> @intrinsic_vfnmadd_mask_vf_nxv16f16_f16_nxv16f16(<vscale x 16 x half> %0, half %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv16f16_f16_nxv16f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.h.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu
+; CHECK-NEXT: vfnmadd.vf v16, ft0, v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x half> @llvm.riscv.vfnmadd.mask.nxv16f16.f16(
+ <vscale x 16 x half> %0,
+ half %1,
+ <vscale x 16 x half> %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 16 x half> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vfnmadd.nxv1f32.f32(
+ <vscale x 1 x float>,
+ float,
+ <vscale x 1 x float>,
+ i32);
+
+define <vscale x 1 x float> @intrinsic_vfnmadd_vf_nxv1f32_f32_nxv1f32(<vscale x 1 x float> %0, float %1, <vscale x 1 x float> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv1f32_f32_nxv1f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.w.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e32,mf2,ta,mu
+; CHECK-NEXT: vfnmadd.vf v16, ft0, v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vfnmadd.nxv1f32.f32(
+ <vscale x 1 x float> %0,
+ float %1,
+ <vscale x 1 x float> %2,
+ i32 %3)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vfnmadd.mask.nxv1f32.f32(
+ <vscale x 1 x float>,
+ float,
+ <vscale x 1 x float>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x float> @intrinsic_vfnmadd_mask_vf_nxv1f32_f32_nxv1f32(<vscale x 1 x float> %0, float %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv1f32_f32_nxv1f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.w.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e32,mf2,ta,mu
+; CHECK-NEXT: vfnmadd.vf v16, ft0, v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vfnmadd.mask.nxv1f32.f32(
+ <vscale x 1 x float> %0,
+ float %1,
+ <vscale x 1 x float> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfnmadd.nxv2f32.f32(
+ <vscale x 2 x float>,
+ float,
+ <vscale x 2 x float>,
+ i32);
+
+define <vscale x 2 x float> @intrinsic_vfnmadd_vf_nxv2f32_f32_nxv2f32(<vscale x 2 x float> %0, float %1, <vscale x 2 x float> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv2f32_f32_nxv2f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.w.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e32,m1,ta,mu
+; CHECK-NEXT: vfnmadd.vf v16, ft0, v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x float> @llvm.riscv.vfnmadd.nxv2f32.f32(
+ <vscale x 2 x float> %0,
+ float %1,
+ <vscale x 2 x float> %2,
+ i32 %3)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfnmadd.mask.nxv2f32.f32(
+ <vscale x 2 x float>,
+ float,
+ <vscale x 2 x float>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x float> @intrinsic_vfnmadd_mask_vf_nxv2f32_f32_nxv2f32(<vscale x 2 x float> %0, float %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv2f32_f32_nxv2f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.w.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e32,m1,ta,mu
+; CHECK-NEXT: vfnmadd.vf v16, ft0, v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x float> @llvm.riscv.vfnmadd.mask.nxv2f32.f32(
+ <vscale x 2 x float> %0,
+ float %1,
+ <vscale x 2 x float> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vfnmadd.nxv4f32.f32(
+ <vscale x 4 x float>,
+ float,
+ <vscale x 4 x float>,
+ i32);
+
+define <vscale x 4 x float> @intrinsic_vfnmadd_vf_nxv4f32_f32_nxv4f32(<vscale x 4 x float> %0, float %1, <vscale x 4 x float> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv4f32_f32_nxv4f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.w.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e32,m2,ta,mu
+; CHECK-NEXT: vfnmadd.vf v16, ft0, v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x float> @llvm.riscv.vfnmadd.nxv4f32.f32(
+ <vscale x 4 x float> %0,
+ float %1,
+ <vscale x 4 x float> %2,
+ i32 %3)
+
+ ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vfnmadd.mask.nxv4f32.f32(
+ <vscale x 4 x float>,
+ float,
+ <vscale x 4 x float>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x float> @intrinsic_vfnmadd_mask_vf_nxv4f32_f32_nxv4f32(<vscale x 4 x float> %0, float %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv4f32_f32_nxv4f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.w.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e32,m2,ta,mu
+; CHECK-NEXT: vfnmadd.vf v16, ft0, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x float> @llvm.riscv.vfnmadd.mask.nxv4f32.f32(
+ <vscale x 4 x float> %0,
+ float %1,
+ <vscale x 4 x float> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vfnmadd.nxv8f32.f32(
+ <vscale x 8 x float>,
+ float,
+ <vscale x 8 x float>,
+ i32);
+
+define <vscale x 8 x float> @intrinsic_vfnmadd_vf_nxv8f32_f32_nxv8f32(<vscale x 8 x float> %0, float %1, <vscale x 8 x float> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv8f32_f32_nxv8f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.w.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu
+; CHECK-NEXT: vfnmadd.vf v16, ft0, v20
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x float> @llvm.riscv.vfnmadd.nxv8f32.f32(
+ <vscale x 8 x float> %0,
+ float %1,
+ <vscale x 8 x float> %2,
+ i32 %3)
+
+ ret <vscale x 8 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vfnmadd.mask.nxv8f32.f32(
+ <vscale x 8 x float>,
+ float,
+ <vscale x 8 x float>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x float> @intrinsic_vfnmadd_mask_vf_nxv8f32_f32_nxv8f32(<vscale x 8 x float> %0, float %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv8f32_f32_nxv8f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.w.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu
+; CHECK-NEXT: vfnmadd.vf v16, ft0, v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x float> @llvm.riscv.vfnmadd.mask.nxv8f32.f32(
+ <vscale x 8 x float> %0,
+ float %1,
+ <vscale x 8 x float> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x float> %a
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfnmadd-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfnmadd-rv64.ll
new file mode 100644
index 000000000000..4b4b8136fe5a
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vfnmadd-rv64.ll
@@ -0,0 +1,1142 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
+; RUN: --riscv-no-aliases < %s | FileCheck %s
+declare <vscale x 1 x half> @llvm.riscv.vfnmadd.nxv1f16.nxv1f16(
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ i64);
+
+define <vscale x 1 x half> @intrinsic_vfnmadd_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv1f16_nxv1f16_nxv1f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu
+; CHECK-NEXT: vfnmadd.vv v16, v17, v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x half> @llvm.riscv.vfnmadd.nxv1f16.nxv1f16(
+ <vscale x 1 x half> %0,
+ <vscale x 1 x half> %1,
+ <vscale x 1 x half> %2,
+ i64 %3)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vfnmadd.mask.nxv1f16.nxv1f16(
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x half> @intrinsic_vfnmadd_mask_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv1f16_nxv1f16_nxv1f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu
+; CHECK-NEXT: vfnmadd.vv v16, v17, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x half> @llvm.riscv.vfnmadd.mask.nxv1f16.nxv1f16(
+ <vscale x 1 x half> %0,
+ <vscale x 1 x half> %1,
+ <vscale x 1 x half> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 2 x half> @llvm.riscv.vfnmadd.nxv2f16.nxv2f16(
+ <vscale x 2 x half>,
+ <vscale x 2 x half>,
+ <vscale x 2 x half>,
+ i64);
+
+define <vscale x 2 x half> @intrinsic_vfnmadd_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv2f16_nxv2f16_nxv2f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu
+; CHECK-NEXT: vfnmadd.vv v16, v17, v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x half> @llvm.riscv.vfnmadd.nxv2f16.nxv2f16(
+ <vscale x 2 x half> %0,
+ <vscale x 2 x half> %1,
+ <vscale x 2 x half> %2,
+ i64 %3)
+
+ ret <vscale x 2 x half> %a
+}
+
+declare <vscale x 2 x half> @llvm.riscv.vfnmadd.mask.nxv2f16.nxv2f16(
+ <vscale x 2 x half>,
+ <vscale x 2 x half>,
+ <vscale x 2 x half>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x half> @intrinsic_vfnmadd_mask_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv2f16_nxv2f16_nxv2f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu
+; CHECK-NEXT: vfnmadd.vv v16, v17, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x half> @llvm.riscv.vfnmadd.mask.nxv2f16.nxv2f16(
+ <vscale x 2 x half> %0,
+ <vscale x 2 x half> %1,
+ <vscale x 2 x half> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vfnmadd.nxv4f16.nxv4f16(
+ <vscale x 4 x half>,
+ <vscale x 4 x half>,
+ <vscale x 4 x half>,
+ i64);
+
+define <vscale x 4 x half> @intrinsic_vfnmadd_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv4f16_nxv4f16_nxv4f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu
+; CHECK-NEXT: vfnmadd.vv v16, v17, v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x half> @llvm.riscv.vfnmadd.nxv4f16.nxv4f16(
+ <vscale x 4 x half> %0,
+ <vscale x 4 x half> %1,
+ <vscale x 4 x half> %2,
+ i64 %3)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vfnmadd.mask.nxv4f16.nxv4f16(
+ <vscale x 4 x half>,
+ <vscale x 4 x half>,
+ <vscale x 4 x half>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x half> @intrinsic_vfnmadd_mask_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv4f16_nxv4f16_nxv4f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu
+; CHECK-NEXT: vfnmadd.vv v16, v17, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x half> @llvm.riscv.vfnmadd.mask.nxv4f16.nxv4f16(
+ <vscale x 4 x half> %0,
+ <vscale x 4 x half> %1,
+ <vscale x 4 x half> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 8 x half> @llvm.riscv.vfnmadd.nxv8f16.nxv8f16(
+ <vscale x 8 x half>,
+ <vscale x 8 x half>,
+ <vscale x 8 x half>,
+ i64);
+
+define <vscale x 8 x half> @intrinsic_vfnmadd_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv8f16_nxv8f16_nxv8f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu
+; CHECK-NEXT: vfnmadd.vv v16, v18, v20
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x half> @llvm.riscv.vfnmadd.nxv8f16.nxv8f16(
+ <vscale x 8 x half> %0,
+ <vscale x 8 x half> %1,
+ <vscale x 8 x half> %2,
+ i64 %3)
+
+ ret <vscale x 8 x half> %a
+}
+
+declare <vscale x 8 x half> @llvm.riscv.vfnmadd.mask.nxv8f16.nxv8f16(
+ <vscale x 8 x half>,
+ <vscale x 8 x half>,
+ <vscale x 8 x half>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x half> @intrinsic_vfnmadd_mask_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv8f16_nxv8f16_nxv8f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu
+; CHECK-NEXT: vfnmadd.vv v16, v18, v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x half> @llvm.riscv.vfnmadd.mask.nxv8f16.nxv8f16(
+ <vscale x 8 x half> %0,
+ <vscale x 8 x half> %1,
+ <vscale x 8 x half> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x half> %a
+}
+
+declare <vscale x 16 x half> @llvm.riscv.vfnmadd.nxv16f16.nxv16f16(
+ <vscale x 16 x half>,
+ <vscale x 16 x half>,
+ <vscale x 16 x half>,
+ i64);
+
+define <vscale x 16 x half> @intrinsic_vfnmadd_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv16f16_nxv16f16_nxv16f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, zero, e16,m4,ta,mu
+; CHECK-NEXT: vle16.v v28, (a0)
+; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu
+; CHECK-NEXT: vfnmadd.vv v16, v20, v28
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x half> @llvm.riscv.vfnmadd.nxv16f16.nxv16f16(
+ <vscale x 16 x half> %0,
+ <vscale x 16 x half> %1,
+ <vscale x 16 x half> %2,
+ i64 %3)
+
+ ret <vscale x 16 x half> %a
+}
+
+declare <vscale x 16 x half> @llvm.riscv.vfnmadd.mask.nxv16f16.nxv16f16(
+ <vscale x 16 x half>,
+ <vscale x 16 x half>,
+ <vscale x 16 x half>,
+ <vscale x 16 x i1>,
+ i64);
+
+define <vscale x 16 x half> @intrinsic_vfnmadd_mask_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv16f16_nxv16f16_nxv16f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, zero, e16,m4,ta,mu
+; CHECK-NEXT: vle16.v v28, (a0)
+; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu
+; CHECK-NEXT: vfnmadd.vv v16, v20, v28, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x half> @llvm.riscv.vfnmadd.mask.nxv16f16.nxv16f16(
+ <vscale x 16 x half> %0,
+ <vscale x 16 x half> %1,
+ <vscale x 16 x half> %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 16 x half> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vfnmadd.nxv1f32.nxv1f32(
+ <vscale x 1 x float>,
+ <vscale x 1 x float>,
+ <vscale x 1 x float>,
+ i64);
+
+define <vscale x 1 x float> @intrinsic_vfnmadd_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv1f32_nxv1f32_nxv1f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu
+; CHECK-NEXT: vfnmadd.vv v16, v17, v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vfnmadd.nxv1f32.nxv1f32(
+ <vscale x 1 x float> %0,
+ <vscale x 1 x float> %1,
+ <vscale x 1 x float> %2,
+ i64 %3)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vfnmadd.mask.nxv1f32.nxv1f32(
+ <vscale x 1 x float>,
+ <vscale x 1 x float>,
+ <vscale x 1 x float>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x float> @intrinsic_vfnmadd_mask_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv1f32_nxv1f32_nxv1f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu
+; CHECK-NEXT: vfnmadd.vv v16, v17, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vfnmadd.mask.nxv1f32.nxv1f32(
+ <vscale x 1 x float> %0,
+ <vscale x 1 x float> %1,
+ <vscale x 1 x float> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfnmadd.nxv2f32.nxv2f32(
+ <vscale x 2 x float>,
+ <vscale x 2 x float>,
+ <vscale x 2 x float>,
+ i64);
+
+define <vscale x 2 x float> @intrinsic_vfnmadd_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv2f32_nxv2f32_nxv2f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu
+; CHECK-NEXT: vfnmadd.vv v16, v17, v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x float> @llvm.riscv.vfnmadd.nxv2f32.nxv2f32(
+ <vscale x 2 x float> %0,
+ <vscale x 2 x float> %1,
+ <vscale x 2 x float> %2,
+ i64 %3)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfnmadd.mask.nxv2f32.nxv2f32(
+ <vscale x 2 x float>,
+ <vscale x 2 x float>,
+ <vscale x 2 x float>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x float> @intrinsic_vfnmadd_mask_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv2f32_nxv2f32_nxv2f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu
+; CHECK-NEXT: vfnmadd.vv v16, v17, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x float> @llvm.riscv.vfnmadd.mask.nxv2f32.nxv2f32(
+ <vscale x 2 x float> %0,
+ <vscale x 2 x float> %1,
+ <vscale x 2 x float> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vfnmadd.nxv4f32.nxv4f32(
+ <vscale x 4 x float>,
+ <vscale x 4 x float>,
+ <vscale x 4 x float>,
+ i64);
+
+define <vscale x 4 x float> @intrinsic_vfnmadd_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv4f32_nxv4f32_nxv4f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu
+; CHECK-NEXT: vfnmadd.vv v16, v18, v20
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x float> @llvm.riscv.vfnmadd.nxv4f32.nxv4f32(
+ <vscale x 4 x float> %0,
+ <vscale x 4 x float> %1,
+ <vscale x 4 x float> %2,
+ i64 %3)
+
+ ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vfnmadd.mask.nxv4f32.nxv4f32(
+ <vscale x 4 x float>,
+ <vscale x 4 x float>,
+ <vscale x 4 x float>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x float> @intrinsic_vfnmadd_mask_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv4f32_nxv4f32_nxv4f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu
+; CHECK-NEXT: vfnmadd.vv v16, v18, v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x float> @llvm.riscv.vfnmadd.mask.nxv4f32.nxv4f32(
+ <vscale x 4 x float> %0,
+ <vscale x 4 x float> %1,
+ <vscale x 4 x float> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vfnmadd.nxv8f32.nxv8f32(
+ <vscale x 8 x float>,
+ <vscale x 8 x float>,
+ <vscale x 8 x float>,
+ i64);
+
+define <vscale x 8 x float> @intrinsic_vfnmadd_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv8f32_nxv8f32_nxv8f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, zero, e32,m4,ta,mu
+; CHECK-NEXT: vle32.v v28, (a0)
+; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu
+; CHECK-NEXT: vfnmadd.vv v16, v20, v28
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x float> @llvm.riscv.vfnmadd.nxv8f32.nxv8f32(
+ <vscale x 8 x float> %0,
+ <vscale x 8 x float> %1,
+ <vscale x 8 x float> %2,
+ i64 %3)
+
+ ret <vscale x 8 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vfnmadd.mask.nxv8f32.nxv8f32(
+ <vscale x 8 x float>,
+ <vscale x 8 x float>,
+ <vscale x 8 x float>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x float> @intrinsic_vfnmadd_mask_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv8f32_nxv8f32_nxv8f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, zero, e32,m4,ta,mu
+; CHECK-NEXT: vle32.v v28, (a0)
+; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu
+; CHECK-NEXT: vfnmadd.vv v16, v20, v28, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x float> @llvm.riscv.vfnmadd.mask.nxv8f32.nxv8f32(
+ <vscale x 8 x float> %0,
+ <vscale x 8 x float> %1,
+ <vscale x 8 x float> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x float> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vfnmadd.nxv1f64.nxv1f64(
+ <vscale x 1 x double>,
+ <vscale x 1 x double>,
+ <vscale x 1 x double>,
+ i64);
+
+define <vscale x 1 x double> @intrinsic_vfnmadd_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv1f64_nxv1f64_nxv1f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu
+; CHECK-NEXT: vfnmadd.vv v16, v17, v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x double> @llvm.riscv.vfnmadd.nxv1f64.nxv1f64(
+ <vscale x 1 x double> %0,
+ <vscale x 1 x double> %1,
+ <vscale x 1 x double> %2,
+ i64 %3)
+
+ ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vfnmadd.mask.nxv1f64.nxv1f64(
+ <vscale x 1 x double>,
+ <vscale x 1 x double>,
+ <vscale x 1 x double>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x double> @intrinsic_vfnmadd_mask_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv1f64_nxv1f64_nxv1f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu
+; CHECK-NEXT: vfnmadd.vv v16, v17, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x double> @llvm.riscv.vfnmadd.mask.nxv1f64.nxv1f64(
+ <vscale x 1 x double> %0,
+ <vscale x 1 x double> %1,
+ <vscale x 1 x double> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vfnmadd.nxv2f64.nxv2f64(
+ <vscale x 2 x double>,
+ <vscale x 2 x double>,
+ <vscale x 2 x double>,
+ i64);
+
+define <vscale x 2 x double> @intrinsic_vfnmadd_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv2f64_nxv2f64_nxv2f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu
+; CHECK-NEXT: vfnmadd.vv v16, v18, v20
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x double> @llvm.riscv.vfnmadd.nxv2f64.nxv2f64(
+ <vscale x 2 x double> %0,
+ <vscale x 2 x double> %1,
+ <vscale x 2 x double> %2,
+ i64 %3)
+
+ ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vfnmadd.mask.nxv2f64.nxv2f64(
+ <vscale x 2 x double>,
+ <vscale x 2 x double>,
+ <vscale x 2 x double>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x double> @intrinsic_vfnmadd_mask_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv2f64_nxv2f64_nxv2f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu
+; CHECK-NEXT: vfnmadd.vv v16, v18, v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x double> @llvm.riscv.vfnmadd.mask.nxv2f64.nxv2f64(
+ <vscale x 2 x double> %0,
+ <vscale x 2 x double> %1,
+ <vscale x 2 x double> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vfnmadd.nxv4f64.nxv4f64(
+ <vscale x 4 x double>,
+ <vscale x 4 x double>,
+ <vscale x 4 x double>,
+ i64);
+
+define <vscale x 4 x double> @intrinsic_vfnmadd_vv_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv4f64_nxv4f64_nxv4f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, zero, e64,m4,ta,mu
+; CHECK-NEXT: vle64.v v28, (a0)
+; CHECK-NEXT: vsetvli a0, a1, e64,m4,ta,mu
+; CHECK-NEXT: vfnmadd.vv v16, v20, v28
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x double> @llvm.riscv.vfnmadd.nxv4f64.nxv4f64(
+ <vscale x 4 x double> %0,
+ <vscale x 4 x double> %1,
+ <vscale x 4 x double> %2,
+ i64 %3)
+
+ ret <vscale x 4 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vfnmadd.mask.nxv4f64.nxv4f64(
+ <vscale x 4 x double>,
+ <vscale x 4 x double>,
+ <vscale x 4 x double>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x double> @intrinsic_vfnmadd_mask_vv_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv4f64_nxv4f64_nxv4f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, zero, e64,m4,ta,mu
+; CHECK-NEXT: vle64.v v28, (a0)
+; CHECK-NEXT: vsetvli a0, a1, e64,m4,ta,mu
+; CHECK-NEXT: vfnmadd.vv v16, v20, v28, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x double> @llvm.riscv.vfnmadd.mask.nxv4f64.nxv4f64(
+ <vscale x 4 x double> %0,
+ <vscale x 4 x double> %1,
+ <vscale x 4 x double> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x double> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vfnmadd.nxv1f16.f16(
+ <vscale x 1 x half>,
+ half,
+ <vscale x 1 x half>,
+ i64);
+
+define <vscale x 1 x half> @intrinsic_vfnmadd_vf_nxv1f16_f16_nxv1f16(<vscale x 1 x half> %0, half %1, <vscale x 1 x half> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv1f16_f16_nxv1f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.h.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e16,mf4,ta,mu
+; CHECK-NEXT: vfnmadd.vf v16, ft0, v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x half> @llvm.riscv.vfnmadd.nxv1f16.f16(
+ <vscale x 1 x half> %0,
+ half %1,
+ <vscale x 1 x half> %2,
+ i64 %3)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vfnmadd.mask.nxv1f16.f16(
+ <vscale x 1 x half>,
+ half,
+ <vscale x 1 x half>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x half> @intrinsic_vfnmadd_mask_vf_nxv1f16_f16_nxv1f16(<vscale x 1 x half> %0, half %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv1f16_f16_nxv1f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.h.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e16,mf4,ta,mu
+; CHECK-NEXT: vfnmadd.vf v16, ft0, v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x half> @llvm.riscv.vfnmadd.mask.nxv1f16.f16(
+ <vscale x 1 x half> %0,
+ half %1,
+ <vscale x 1 x half> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 2 x half> @llvm.riscv.vfnmadd.nxv2f16.f16(
+ <vscale x 2 x half>,
+ half,
+ <vscale x 2 x half>,
+ i64);
+
+define <vscale x 2 x half> @intrinsic_vfnmadd_vf_nxv2f16_f16_nxv2f16(<vscale x 2 x half> %0, half %1, <vscale x 2 x half> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv2f16_f16_nxv2f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.h.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e16,mf2,ta,mu
+; CHECK-NEXT: vfnmadd.vf v16, ft0, v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x half> @llvm.riscv.vfnmadd.nxv2f16.f16(
+ <vscale x 2 x half> %0,
+ half %1,
+ <vscale x 2 x half> %2,
+ i64 %3)
+
+ ret <vscale x 2 x half> %a
+}
+
+declare <vscale x 2 x half> @llvm.riscv.vfnmadd.mask.nxv2f16.f16(
+ <vscale x 2 x half>,
+ half,
+ <vscale x 2 x half>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x half> @intrinsic_vfnmadd_mask_vf_nxv2f16_f16_nxv2f16(<vscale x 2 x half> %0, half %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv2f16_f16_nxv2f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.h.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e16,mf2,ta,mu
+; CHECK-NEXT: vfnmadd.vf v16, ft0, v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x half> @llvm.riscv.vfnmadd.mask.nxv2f16.f16(
+ <vscale x 2 x half> %0,
+ half %1,
+ <vscale x 2 x half> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vfnmadd.nxv4f16.f16(
+ <vscale x 4 x half>,
+ half,
+ <vscale x 4 x half>,
+ i64);
+
+define <vscale x 4 x half> @intrinsic_vfnmadd_vf_nxv4f16_f16_nxv4f16(<vscale x 4 x half> %0, half %1, <vscale x 4 x half> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv4f16_f16_nxv4f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.h.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e16,m1,ta,mu
+; CHECK-NEXT: vfnmadd.vf v16, ft0, v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x half> @llvm.riscv.vfnmadd.nxv4f16.f16(
+ <vscale x 4 x half> %0,
+ half %1,
+ <vscale x 4 x half> %2,
+ i64 %3)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vfnmadd.mask.nxv4f16.f16(
+ <vscale x 4 x half>,
+ half,
+ <vscale x 4 x half>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x half> @intrinsic_vfnmadd_mask_vf_nxv4f16_f16_nxv4f16(<vscale x 4 x half> %0, half %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv4f16_f16_nxv4f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.h.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e16,m1,ta,mu
+; CHECK-NEXT: vfnmadd.vf v16, ft0, v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x half> @llvm.riscv.vfnmadd.mask.nxv4f16.f16(
+ <vscale x 4 x half> %0,
+ half %1,
+ <vscale x 4 x half> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 8 x half> @llvm.riscv.vfnmadd.nxv8f16.f16(
+ <vscale x 8 x half>,
+ half,
+ <vscale x 8 x half>,
+ i64);
+
+define <vscale x 8 x half> @intrinsic_vfnmadd_vf_nxv8f16_f16_nxv8f16(<vscale x 8 x half> %0, half %1, <vscale x 8 x half> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv8f16_f16_nxv8f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.h.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e16,m2,ta,mu
+; CHECK-NEXT: vfnmadd.vf v16, ft0, v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x half> @llvm.riscv.vfnmadd.nxv8f16.f16(
+ <vscale x 8 x half> %0,
+ half %1,
+ <vscale x 8 x half> %2,
+ i64 %3)
+
+ ret <vscale x 8 x half> %a
+}
+
+declare <vscale x 8 x half> @llvm.riscv.vfnmadd.mask.nxv8f16.f16(
+ <vscale x 8 x half>,
+ half,
+ <vscale x 8 x half>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x half> @intrinsic_vfnmadd_mask_vf_nxv8f16_f16_nxv8f16(<vscale x 8 x half> %0, half %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv8f16_f16_nxv8f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.h.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e16,m2,ta,mu
+; CHECK-NEXT: vfnmadd.vf v16, ft0, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x half> @llvm.riscv.vfnmadd.mask.nxv8f16.f16(
+ <vscale x 8 x half> %0,
+ half %1,
+ <vscale x 8 x half> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x half> %a
+}
+
+declare <vscale x 16 x half> @llvm.riscv.vfnmadd.nxv16f16.f16(
+ <vscale x 16 x half>,
+ half,
+ <vscale x 16 x half>,
+ i64);
+
+define <vscale x 16 x half> @intrinsic_vfnmadd_vf_nxv16f16_f16_nxv16f16(<vscale x 16 x half> %0, half %1, <vscale x 16 x half> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv16f16_f16_nxv16f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.h.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu
+; CHECK-NEXT: vfnmadd.vf v16, ft0, v20
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x half> @llvm.riscv.vfnmadd.nxv16f16.f16(
+ <vscale x 16 x half> %0,
+ half %1,
+ <vscale x 16 x half> %2,
+ i64 %3)
+
+ ret <vscale x 16 x half> %a
+}
+
+declare <vscale x 16 x half> @llvm.riscv.vfnmadd.mask.nxv16f16.f16(
+ <vscale x 16 x half>,
+ half,
+ <vscale x 16 x half>,
+ <vscale x 16 x i1>,
+ i64);
+
+define <vscale x 16 x half> @intrinsic_vfnmadd_mask_vf_nxv16f16_f16_nxv16f16(<vscale x 16 x half> %0, half %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv16f16_f16_nxv16f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.h.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu
+; CHECK-NEXT: vfnmadd.vf v16, ft0, v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x half> @llvm.riscv.vfnmadd.mask.nxv16f16.f16(
+ <vscale x 16 x half> %0,
+ half %1,
+ <vscale x 16 x half> %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 16 x half> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vfnmadd.nxv1f32.f32(
+ <vscale x 1 x float>,
+ float,
+ <vscale x 1 x float>,
+ i64);
+
+define <vscale x 1 x float> @intrinsic_vfnmadd_vf_nxv1f32_f32_nxv1f32(<vscale x 1 x float> %0, float %1, <vscale x 1 x float> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv1f32_f32_nxv1f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.w.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e32,mf2,ta,mu
+; CHECK-NEXT: vfnmadd.vf v16, ft0, v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vfnmadd.nxv1f32.f32(
+ <vscale x 1 x float> %0,
+ float %1,
+ <vscale x 1 x float> %2,
+ i64 %3)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vfnmadd.mask.nxv1f32.f32(
+ <vscale x 1 x float>,
+ float,
+ <vscale x 1 x float>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x float> @intrinsic_vfnmadd_mask_vf_nxv1f32_f32_nxv1f32(<vscale x 1 x float> %0, float %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv1f32_f32_nxv1f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.w.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e32,mf2,ta,mu
+; CHECK-NEXT: vfnmadd.vf v16, ft0, v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vfnmadd.mask.nxv1f32.f32(
+ <vscale x 1 x float> %0,
+ float %1,
+ <vscale x 1 x float> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfnmadd.nxv2f32.f32(
+ <vscale x 2 x float>,
+ float,
+ <vscale x 2 x float>,
+ i64);
+
+define <vscale x 2 x float> @intrinsic_vfnmadd_vf_nxv2f32_f32_nxv2f32(<vscale x 2 x float> %0, float %1, <vscale x 2 x float> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv2f32_f32_nxv2f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.w.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e32,m1,ta,mu
+; CHECK-NEXT: vfnmadd.vf v16, ft0, v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x float> @llvm.riscv.vfnmadd.nxv2f32.f32(
+ <vscale x 2 x float> %0,
+ float %1,
+ <vscale x 2 x float> %2,
+ i64 %3)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfnmadd.mask.nxv2f32.f32(
+ <vscale x 2 x float>,
+ float,
+ <vscale x 2 x float>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x float> @intrinsic_vfnmadd_mask_vf_nxv2f32_f32_nxv2f32(<vscale x 2 x float> %0, float %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv2f32_f32_nxv2f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.w.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e32,m1,ta,mu
+; CHECK-NEXT: vfnmadd.vf v16, ft0, v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x float> @llvm.riscv.vfnmadd.mask.nxv2f32.f32(
+ <vscale x 2 x float> %0,
+ float %1,
+ <vscale x 2 x float> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vfnmadd.nxv4f32.f32(
+ <vscale x 4 x float>,
+ float,
+ <vscale x 4 x float>,
+ i64);
+
+define <vscale x 4 x float> @intrinsic_vfnmadd_vf_nxv4f32_f32_nxv4f32(<vscale x 4 x float> %0, float %1, <vscale x 4 x float> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv4f32_f32_nxv4f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.w.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e32,m2,ta,mu
+; CHECK-NEXT: vfnmadd.vf v16, ft0, v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x float> @llvm.riscv.vfnmadd.nxv4f32.f32(
+ <vscale x 4 x float> %0,
+ float %1,
+ <vscale x 4 x float> %2,
+ i64 %3)
+
+ ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vfnmadd.mask.nxv4f32.f32(
+ <vscale x 4 x float>,
+ float,
+ <vscale x 4 x float>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x float> @intrinsic_vfnmadd_mask_vf_nxv4f32_f32_nxv4f32(<vscale x 4 x float> %0, float %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv4f32_f32_nxv4f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.w.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e32,m2,ta,mu
+; CHECK-NEXT: vfnmadd.vf v16, ft0, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x float> @llvm.riscv.vfnmadd.mask.nxv4f32.f32(
+ <vscale x 4 x float> %0,
+ float %1,
+ <vscale x 4 x float> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vfnmadd.nxv8f32.f32(
+ <vscale x 8 x float>,
+ float,
+ <vscale x 8 x float>,
+ i64);
+
+define <vscale x 8 x float> @intrinsic_vfnmadd_vf_nxv8f32_f32_nxv8f32(<vscale x 8 x float> %0, float %1, <vscale x 8 x float> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv8f32_f32_nxv8f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.w.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu
+; CHECK-NEXT: vfnmadd.vf v16, ft0, v20
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x float> @llvm.riscv.vfnmadd.nxv8f32.f32(
+ <vscale x 8 x float> %0,
+ float %1,
+ <vscale x 8 x float> %2,
+ i64 %3)
+
+ ret <vscale x 8 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vfnmadd.mask.nxv8f32.f32(
+ <vscale x 8 x float>,
+ float,
+ <vscale x 8 x float>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x float> @intrinsic_vfnmadd_mask_vf_nxv8f32_f32_nxv8f32(<vscale x 8 x float> %0, float %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv8f32_f32_nxv8f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.w.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu
+; CHECK-NEXT: vfnmadd.vf v16, ft0, v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x float> @llvm.riscv.vfnmadd.mask.nxv8f32.f32(
+ <vscale x 8 x float> %0,
+ float %1,
+ <vscale x 8 x float> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x float> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vfnmadd.nxv1f64.f64(
+ <vscale x 1 x double>,
+ double,
+ <vscale x 1 x double>,
+ i64);
+
+define <vscale x 1 x double> @intrinsic_vfnmadd_vf_nxv1f64_f64_nxv1f64(<vscale x 1 x double> %0, double %1, <vscale x 1 x double> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv1f64_f64_nxv1f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.d.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e64,m1,ta,mu
+; CHECK-NEXT: vfnmadd.vf v16, ft0, v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x double> @llvm.riscv.vfnmadd.nxv1f64.f64(
+ <vscale x 1 x double> %0,
+ double %1,
+ <vscale x 1 x double> %2,
+ i64 %3)
+
+ ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vfnmadd.mask.nxv1f64.f64(
+ <vscale x 1 x double>,
+ double,
+ <vscale x 1 x double>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x double> @intrinsic_vfnmadd_mask_vf_nxv1f64_f64_nxv1f64(<vscale x 1 x double> %0, double %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv1f64_f64_nxv1f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.d.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e64,m1,ta,mu
+; CHECK-NEXT: vfnmadd.vf v16, ft0, v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x double> @llvm.riscv.vfnmadd.mask.nxv1f64.f64(
+ <vscale x 1 x double> %0,
+ double %1,
+ <vscale x 1 x double> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vfnmadd.nxv2f64.f64(
+ <vscale x 2 x double>,
+ double,
+ <vscale x 2 x double>,
+ i64);
+
+define <vscale x 2 x double> @intrinsic_vfnmadd_vf_nxv2f64_f64_nxv2f64(<vscale x 2 x double> %0, double %1, <vscale x 2 x double> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv2f64_f64_nxv2f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.d.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e64,m2,ta,mu
+; CHECK-NEXT: vfnmadd.vf v16, ft0, v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x double> @llvm.riscv.vfnmadd.nxv2f64.f64(
+ <vscale x 2 x double> %0,
+ double %1,
+ <vscale x 2 x double> %2,
+ i64 %3)
+
+ ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vfnmadd.mask.nxv2f64.f64(
+ <vscale x 2 x double>,
+ double,
+ <vscale x 2 x double>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x double> @intrinsic_vfnmadd_mask_vf_nxv2f64_f64_nxv2f64(<vscale x 2 x double> %0, double %1, <vscale x 2 x double> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv2f64_f64_nxv2f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.d.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e64,m2,ta,mu
+; CHECK-NEXT: vfnmadd.vf v16, ft0, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x double> @llvm.riscv.vfnmadd.mask.nxv2f64.f64(
+ <vscale x 2 x double> %0,
+ double %1,
+ <vscale x 2 x double> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vfnmadd.nxv4f64.f64(
+ <vscale x 4 x double>,
+ double,
+ <vscale x 4 x double>,
+ i64);
+
+define <vscale x 4 x double> @intrinsic_vfnmadd_vf_nxv4f64_f64_nxv4f64(<vscale x 4 x double> %0, double %1, <vscale x 4 x double> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv4f64_f64_nxv4f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.d.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e64,m4,ta,mu
+; CHECK-NEXT: vfnmadd.vf v16, ft0, v20
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x double> @llvm.riscv.vfnmadd.nxv4f64.f64(
+ <vscale x 4 x double> %0,
+ double %1,
+ <vscale x 4 x double> %2,
+ i64 %3)
+
+ ret <vscale x 4 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vfnmadd.mask.nxv4f64.f64(
+ <vscale x 4 x double>,
+ double,
+ <vscale x 4 x double>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x double> @intrinsic_vfnmadd_mask_vf_nxv4f64_f64_nxv4f64(<vscale x 4 x double> %0, double %1, <vscale x 4 x double> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv4f64_f64_nxv4f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.d.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e64,m4,ta,mu
+; CHECK-NEXT: vfnmadd.vf v16, ft0, v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x double> @llvm.riscv.vfnmadd.mask.nxv4f64.f64(
+ <vscale x 4 x double> %0,
+ double %1,
+ <vscale x 4 x double> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x double> %a
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfnmsac-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfnmsac-rv32.ll
new file mode 100644
index 000000000000..7b090881a336
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vfnmsac-rv32.ll
@@ -0,0 +1,856 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f,+experimental-zfh -verify-machineinstrs \
+; RUN: --riscv-no-aliases < %s | FileCheck %s
+declare <vscale x 1 x half> @llvm.riscv.vfnmsac.nxv1f16.nxv1f16(
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ i32);
+
+define <vscale x 1 x half> @intrinsic_vfnmsac_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv1f16_nxv1f16_nxv1f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu
+; CHECK-NEXT: vfnmsac.vv v16, v17, v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x half> @llvm.riscv.vfnmsac.nxv1f16.nxv1f16(
+ <vscale x 1 x half> %0,
+ <vscale x 1 x half> %1,
+ <vscale x 1 x half> %2,
+ i32 %3)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vfnmsac.mask.nxv1f16.nxv1f16(
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x half> @intrinsic_vfnmsac_mask_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv1f16_nxv1f16_nxv1f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu
+; CHECK-NEXT: vfnmsac.vv v16, v17, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x half> @llvm.riscv.vfnmsac.mask.nxv1f16.nxv1f16(
+ <vscale x 1 x half> %0,
+ <vscale x 1 x half> %1,
+ <vscale x 1 x half> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 2 x half> @llvm.riscv.vfnmsac.nxv2f16.nxv2f16(
+ <vscale x 2 x half>,
+ <vscale x 2 x half>,
+ <vscale x 2 x half>,
+ i32);
+
+define <vscale x 2 x half> @intrinsic_vfnmsac_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv2f16_nxv2f16_nxv2f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu
+; CHECK-NEXT: vfnmsac.vv v16, v17, v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x half> @llvm.riscv.vfnmsac.nxv2f16.nxv2f16(
+ <vscale x 2 x half> %0,
+ <vscale x 2 x half> %1,
+ <vscale x 2 x half> %2,
+ i32 %3)
+
+ ret <vscale x 2 x half> %a
+}
+
+declare <vscale x 2 x half> @llvm.riscv.vfnmsac.mask.nxv2f16.nxv2f16(
+ <vscale x 2 x half>,
+ <vscale x 2 x half>,
+ <vscale x 2 x half>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x half> @intrinsic_vfnmsac_mask_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv2f16_nxv2f16_nxv2f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu
+; CHECK-NEXT: vfnmsac.vv v16, v17, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x half> @llvm.riscv.vfnmsac.mask.nxv2f16.nxv2f16(
+ <vscale x 2 x half> %0,
+ <vscale x 2 x half> %1,
+ <vscale x 2 x half> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vfnmsac.nxv4f16.nxv4f16(
+ <vscale x 4 x half>,
+ <vscale x 4 x half>,
+ <vscale x 4 x half>,
+ i32);
+
+define <vscale x 4 x half> @intrinsic_vfnmsac_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv4f16_nxv4f16_nxv4f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu
+; CHECK-NEXT: vfnmsac.vv v16, v17, v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x half> @llvm.riscv.vfnmsac.nxv4f16.nxv4f16(
+ <vscale x 4 x half> %0,
+ <vscale x 4 x half> %1,
+ <vscale x 4 x half> %2,
+ i32 %3)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vfnmsac.mask.nxv4f16.nxv4f16(
+ <vscale x 4 x half>,
+ <vscale x 4 x half>,
+ <vscale x 4 x half>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x half> @intrinsic_vfnmsac_mask_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv4f16_nxv4f16_nxv4f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu
+; CHECK-NEXT: vfnmsac.vv v16, v17, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x half> @llvm.riscv.vfnmsac.mask.nxv4f16.nxv4f16(
+ <vscale x 4 x half> %0,
+ <vscale x 4 x half> %1,
+ <vscale x 4 x half> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 8 x half> @llvm.riscv.vfnmsac.nxv8f16.nxv8f16(
+ <vscale x 8 x half>,
+ <vscale x 8 x half>,
+ <vscale x 8 x half>,
+ i32);
+
+define <vscale x 8 x half> @intrinsic_vfnmsac_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv8f16_nxv8f16_nxv8f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu
+; CHECK-NEXT: vfnmsac.vv v16, v18, v20
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x half> @llvm.riscv.vfnmsac.nxv8f16.nxv8f16(
+ <vscale x 8 x half> %0,
+ <vscale x 8 x half> %1,
+ <vscale x 8 x half> %2,
+ i32 %3)
+
+ ret <vscale x 8 x half> %a
+}
+
+declare <vscale x 8 x half> @llvm.riscv.vfnmsac.mask.nxv8f16.nxv8f16(
+ <vscale x 8 x half>,
+ <vscale x 8 x half>,
+ <vscale x 8 x half>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x half> @intrinsic_vfnmsac_mask_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv8f16_nxv8f16_nxv8f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu
+; CHECK-NEXT: vfnmsac.vv v16, v18, v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x half> @llvm.riscv.vfnmsac.mask.nxv8f16.nxv8f16(
+ <vscale x 8 x half> %0,
+ <vscale x 8 x half> %1,
+ <vscale x 8 x half> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x half> %a
+}
+
+declare <vscale x 16 x half> @llvm.riscv.vfnmsac.nxv16f16.nxv16f16(
+ <vscale x 16 x half>,
+ <vscale x 16 x half>,
+ <vscale x 16 x half>,
+ i32);
+
+define <vscale x 16 x half> @intrinsic_vfnmsac_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv16f16_nxv16f16_nxv16f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, zero, e16,m4,ta,mu
+; CHECK-NEXT: vle16.v v28, (a0)
+; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu
+; CHECK-NEXT: vfnmsac.vv v16, v20, v28
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x half> @llvm.riscv.vfnmsac.nxv16f16.nxv16f16(
+ <vscale x 16 x half> %0,
+ <vscale x 16 x half> %1,
+ <vscale x 16 x half> %2,
+ i32 %3)
+
+ ret <vscale x 16 x half> %a
+}
+
+declare <vscale x 16 x half> @llvm.riscv.vfnmsac.mask.nxv16f16.nxv16f16(
+ <vscale x 16 x half>,
+ <vscale x 16 x half>,
+ <vscale x 16 x half>,
+ <vscale x 16 x i1>,
+ i32);
+
+define <vscale x 16 x half> @intrinsic_vfnmsac_mask_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv16f16_nxv16f16_nxv16f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, zero, e16,m4,ta,mu
+; CHECK-NEXT: vle16.v v28, (a0)
+; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu
+; CHECK-NEXT: vfnmsac.vv v16, v20, v28, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x half> @llvm.riscv.vfnmsac.mask.nxv16f16.nxv16f16(
+ <vscale x 16 x half> %0,
+ <vscale x 16 x half> %1,
+ <vscale x 16 x half> %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 16 x half> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vfnmsac.nxv1f32.nxv1f32(
+ <vscale x 1 x float>,
+ <vscale x 1 x float>,
+ <vscale x 1 x float>,
+ i32);
+
+define <vscale x 1 x float> @intrinsic_vfnmsac_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv1f32_nxv1f32_nxv1f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu
+; CHECK-NEXT: vfnmsac.vv v16, v17, v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vfnmsac.nxv1f32.nxv1f32(
+ <vscale x 1 x float> %0,
+ <vscale x 1 x float> %1,
+ <vscale x 1 x float> %2,
+ i32 %3)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vfnmsac.mask.nxv1f32.nxv1f32(
+ <vscale x 1 x float>,
+ <vscale x 1 x float>,
+ <vscale x 1 x float>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x float> @intrinsic_vfnmsac_mask_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv1f32_nxv1f32_nxv1f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu
+; CHECK-NEXT: vfnmsac.vv v16, v17, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vfnmsac.mask.nxv1f32.nxv1f32(
+ <vscale x 1 x float> %0,
+ <vscale x 1 x float> %1,
+ <vscale x 1 x float> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfnmsac.nxv2f32.nxv2f32(
+ <vscale x 2 x float>,
+ <vscale x 2 x float>,
+ <vscale x 2 x float>,
+ i32);
+
+define <vscale x 2 x float> @intrinsic_vfnmsac_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv2f32_nxv2f32_nxv2f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu
+; CHECK-NEXT: vfnmsac.vv v16, v17, v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x float> @llvm.riscv.vfnmsac.nxv2f32.nxv2f32(
+ <vscale x 2 x float> %0,
+ <vscale x 2 x float> %1,
+ <vscale x 2 x float> %2,
+ i32 %3)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfnmsac.mask.nxv2f32.nxv2f32(
+ <vscale x 2 x float>,
+ <vscale x 2 x float>,
+ <vscale x 2 x float>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x float> @intrinsic_vfnmsac_mask_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv2f32_nxv2f32_nxv2f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu
+; CHECK-NEXT: vfnmsac.vv v16, v17, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x float> @llvm.riscv.vfnmsac.mask.nxv2f32.nxv2f32(
+ <vscale x 2 x float> %0,
+ <vscale x 2 x float> %1,
+ <vscale x 2 x float> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vfnmsac.nxv4f32.nxv4f32(
+ <vscale x 4 x float>,
+ <vscale x 4 x float>,
+ <vscale x 4 x float>,
+ i32);
+
+define <vscale x 4 x float> @intrinsic_vfnmsac_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv4f32_nxv4f32_nxv4f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu
+; CHECK-NEXT: vfnmsac.vv v16, v18, v20
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x float> @llvm.riscv.vfnmsac.nxv4f32.nxv4f32(
+ <vscale x 4 x float> %0,
+ <vscale x 4 x float> %1,
+ <vscale x 4 x float> %2,
+ i32 %3)
+
+ ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vfnmsac.mask.nxv4f32.nxv4f32(
+ <vscale x 4 x float>,
+ <vscale x 4 x float>,
+ <vscale x 4 x float>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x float> @intrinsic_vfnmsac_mask_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv4f32_nxv4f32_nxv4f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu
+; CHECK-NEXT: vfnmsac.vv v16, v18, v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x float> @llvm.riscv.vfnmsac.mask.nxv4f32.nxv4f32(
+ <vscale x 4 x float> %0,
+ <vscale x 4 x float> %1,
+ <vscale x 4 x float> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vfnmsac.nxv8f32.nxv8f32(
+ <vscale x 8 x float>,
+ <vscale x 8 x float>,
+ <vscale x 8 x float>,
+ i32);
+
+define <vscale x 8 x float> @intrinsic_vfnmsac_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv8f32_nxv8f32_nxv8f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, zero, e32,m4,ta,mu
+; CHECK-NEXT: vle32.v v28, (a0)
+; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu
+; CHECK-NEXT: vfnmsac.vv v16, v20, v28
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x float> @llvm.riscv.vfnmsac.nxv8f32.nxv8f32(
+ <vscale x 8 x float> %0,
+ <vscale x 8 x float> %1,
+ <vscale x 8 x float> %2,
+ i32 %3)
+
+ ret <vscale x 8 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vfnmsac.mask.nxv8f32.nxv8f32(
+ <vscale x 8 x float>,
+ <vscale x 8 x float>,
+ <vscale x 8 x float>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x float> @intrinsic_vfnmsac_mask_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv8f32_nxv8f32_nxv8f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, zero, e32,m4,ta,mu
+; CHECK-NEXT: vle32.v v28, (a0)
+; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu
+; CHECK-NEXT: vfnmsac.vv v16, v20, v28, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x float> @llvm.riscv.vfnmsac.mask.nxv8f32.nxv8f32(
+ <vscale x 8 x float> %0,
+ <vscale x 8 x float> %1,
+ <vscale x 8 x float> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x float> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vfnmsac.nxv1f16.f16(
+ <vscale x 1 x half>,
+ half,
+ <vscale x 1 x half>,
+ i32);
+
+define <vscale x 1 x half> @intrinsic_vfnmsac_vf_nxv1f16_f16_nxv1f16(<vscale x 1 x half> %0, half %1, <vscale x 1 x half> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv1f16_f16_nxv1f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.h.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e16,mf4,ta,mu
+; CHECK-NEXT: vfnmsac.vf v16, ft0, v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x half> @llvm.riscv.vfnmsac.nxv1f16.f16(
+ <vscale x 1 x half> %0,
+ half %1,
+ <vscale x 1 x half> %2,
+ i32 %3)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vfnmsac.mask.nxv1f16.f16(
+ <vscale x 1 x half>,
+ half,
+ <vscale x 1 x half>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x half> @intrinsic_vfnmsac_mask_vf_nxv1f16_f16_nxv1f16(<vscale x 1 x half> %0, half %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv1f16_f16_nxv1f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.h.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e16,mf4,ta,mu
+; CHECK-NEXT: vfnmsac.vf v16, ft0, v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x half> @llvm.riscv.vfnmsac.mask.nxv1f16.f16(
+ <vscale x 1 x half> %0,
+ half %1,
+ <vscale x 1 x half> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 2 x half> @llvm.riscv.vfnmsac.nxv2f16.f16(
+ <vscale x 2 x half>,
+ half,
+ <vscale x 2 x half>,
+ i32);
+
+define <vscale x 2 x half> @intrinsic_vfnmsac_vf_nxv2f16_f16_nxv2f16(<vscale x 2 x half> %0, half %1, <vscale x 2 x half> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv2f16_f16_nxv2f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.h.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e16,mf2,ta,mu
+; CHECK-NEXT: vfnmsac.vf v16, ft0, v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x half> @llvm.riscv.vfnmsac.nxv2f16.f16(
+ <vscale x 2 x half> %0,
+ half %1,
+ <vscale x 2 x half> %2,
+ i32 %3)
+
+ ret <vscale x 2 x half> %a
+}
+
+declare <vscale x 2 x half> @llvm.riscv.vfnmsac.mask.nxv2f16.f16(
+ <vscale x 2 x half>,
+ half,
+ <vscale x 2 x half>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x half> @intrinsic_vfnmsac_mask_vf_nxv2f16_f16_nxv2f16(<vscale x 2 x half> %0, half %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv2f16_f16_nxv2f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.h.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e16,mf2,ta,mu
+; CHECK-NEXT: vfnmsac.vf v16, ft0, v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x half> @llvm.riscv.vfnmsac.mask.nxv2f16.f16(
+ <vscale x 2 x half> %0,
+ half %1,
+ <vscale x 2 x half> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vfnmsac.nxv4f16.f16(
+ <vscale x 4 x half>,
+ half,
+ <vscale x 4 x half>,
+ i32);
+
+define <vscale x 4 x half> @intrinsic_vfnmsac_vf_nxv4f16_f16_nxv4f16(<vscale x 4 x half> %0, half %1, <vscale x 4 x half> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv4f16_f16_nxv4f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.h.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e16,m1,ta,mu
+; CHECK-NEXT: vfnmsac.vf v16, ft0, v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x half> @llvm.riscv.vfnmsac.nxv4f16.f16(
+ <vscale x 4 x half> %0,
+ half %1,
+ <vscale x 4 x half> %2,
+ i32 %3)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vfnmsac.mask.nxv4f16.f16(
+ <vscale x 4 x half>,
+ half,
+ <vscale x 4 x half>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x half> @intrinsic_vfnmsac_mask_vf_nxv4f16_f16_nxv4f16(<vscale x 4 x half> %0, half %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv4f16_f16_nxv4f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.h.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e16,m1,ta,mu
+; CHECK-NEXT: vfnmsac.vf v16, ft0, v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x half> @llvm.riscv.vfnmsac.mask.nxv4f16.f16(
+ <vscale x 4 x half> %0,
+ half %1,
+ <vscale x 4 x half> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 8 x half> @llvm.riscv.vfnmsac.nxv8f16.f16(
+ <vscale x 8 x half>,
+ half,
+ <vscale x 8 x half>,
+ i32);
+
+define <vscale x 8 x half> @intrinsic_vfnmsac_vf_nxv8f16_f16_nxv8f16(<vscale x 8 x half> %0, half %1, <vscale x 8 x half> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv8f16_f16_nxv8f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.h.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e16,m2,ta,mu
+; CHECK-NEXT: vfnmsac.vf v16, ft0, v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x half> @llvm.riscv.vfnmsac.nxv8f16.f16(
+ <vscale x 8 x half> %0,
+ half %1,
+ <vscale x 8 x half> %2,
+ i32 %3)
+
+ ret <vscale x 8 x half> %a
+}
+
+declare <vscale x 8 x half> @llvm.riscv.vfnmsac.mask.nxv8f16.f16(
+ <vscale x 8 x half>,
+ half,
+ <vscale x 8 x half>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x half> @intrinsic_vfnmsac_mask_vf_nxv8f16_f16_nxv8f16(<vscale x 8 x half> %0, half %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv8f16_f16_nxv8f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.h.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e16,m2,ta,mu
+; CHECK-NEXT: vfnmsac.vf v16, ft0, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x half> @llvm.riscv.vfnmsac.mask.nxv8f16.f16(
+ <vscale x 8 x half> %0,
+ half %1,
+ <vscale x 8 x half> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x half> %a
+}
+
+declare <vscale x 16 x half> @llvm.riscv.vfnmsac.nxv16f16.f16(
+ <vscale x 16 x half>,
+ half,
+ <vscale x 16 x half>,
+ i32);
+
+define <vscale x 16 x half> @intrinsic_vfnmsac_vf_nxv16f16_f16_nxv16f16(<vscale x 16 x half> %0, half %1, <vscale x 16 x half> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv16f16_f16_nxv16f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.h.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu
+; CHECK-NEXT: vfnmsac.vf v16, ft0, v20
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x half> @llvm.riscv.vfnmsac.nxv16f16.f16(
+ <vscale x 16 x half> %0,
+ half %1,
+ <vscale x 16 x half> %2,
+ i32 %3)
+
+ ret <vscale x 16 x half> %a
+}
+
+declare <vscale x 16 x half> @llvm.riscv.vfnmsac.mask.nxv16f16.f16(
+ <vscale x 16 x half>,
+ half,
+ <vscale x 16 x half>,
+ <vscale x 16 x i1>,
+ i32);
+
+define <vscale x 16 x half> @intrinsic_vfnmsac_mask_vf_nxv16f16_f16_nxv16f16(<vscale x 16 x half> %0, half %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv16f16_f16_nxv16f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.h.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu
+; CHECK-NEXT: vfnmsac.vf v16, ft0, v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x half> @llvm.riscv.vfnmsac.mask.nxv16f16.f16(
+ <vscale x 16 x half> %0,
+ half %1,
+ <vscale x 16 x half> %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 16 x half> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vfnmsac.nxv1f32.f32(
+ <vscale x 1 x float>,
+ float,
+ <vscale x 1 x float>,
+ i32);
+
+define <vscale x 1 x float> @intrinsic_vfnmsac_vf_nxv1f32_f32_nxv1f32(<vscale x 1 x float> %0, float %1, <vscale x 1 x float> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv1f32_f32_nxv1f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.w.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e32,mf2,ta,mu
+; CHECK-NEXT: vfnmsac.vf v16, ft0, v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vfnmsac.nxv1f32.f32(
+ <vscale x 1 x float> %0,
+ float %1,
+ <vscale x 1 x float> %2,
+ i32 %3)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vfnmsac.mask.nxv1f32.f32(
+ <vscale x 1 x float>,
+ float,
+ <vscale x 1 x float>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x float> @intrinsic_vfnmsac_mask_vf_nxv1f32_f32_nxv1f32(<vscale x 1 x float> %0, float %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv1f32_f32_nxv1f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.w.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e32,mf2,ta,mu
+; CHECK-NEXT: vfnmsac.vf v16, ft0, v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vfnmsac.mask.nxv1f32.f32(
+ <vscale x 1 x float> %0,
+ float %1,
+ <vscale x 1 x float> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfnmsac.nxv2f32.f32(
+ <vscale x 2 x float>,
+ float,
+ <vscale x 2 x float>,
+ i32);
+
+define <vscale x 2 x float> @intrinsic_vfnmsac_vf_nxv2f32_f32_nxv2f32(<vscale x 2 x float> %0, float %1, <vscale x 2 x float> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv2f32_f32_nxv2f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.w.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e32,m1,ta,mu
+; CHECK-NEXT: vfnmsac.vf v16, ft0, v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x float> @llvm.riscv.vfnmsac.nxv2f32.f32(
+ <vscale x 2 x float> %0,
+ float %1,
+ <vscale x 2 x float> %2,
+ i32 %3)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfnmsac.mask.nxv2f32.f32(
+ <vscale x 2 x float>,
+ float,
+ <vscale x 2 x float>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x float> @intrinsic_vfnmsac_mask_vf_nxv2f32_f32_nxv2f32(<vscale x 2 x float> %0, float %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv2f32_f32_nxv2f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.w.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e32,m1,ta,mu
+; CHECK-NEXT: vfnmsac.vf v16, ft0, v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x float> @llvm.riscv.vfnmsac.mask.nxv2f32.f32(
+ <vscale x 2 x float> %0,
+ float %1,
+ <vscale x 2 x float> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vfnmsac.nxv4f32.f32(
+ <vscale x 4 x float>,
+ float,
+ <vscale x 4 x float>,
+ i32);
+
+define <vscale x 4 x float> @intrinsic_vfnmsac_vf_nxv4f32_f32_nxv4f32(<vscale x 4 x float> %0, float %1, <vscale x 4 x float> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv4f32_f32_nxv4f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.w.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e32,m2,ta,mu
+; CHECK-NEXT: vfnmsac.vf v16, ft0, v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x float> @llvm.riscv.vfnmsac.nxv4f32.f32(
+ <vscale x 4 x float> %0,
+ float %1,
+ <vscale x 4 x float> %2,
+ i32 %3)
+
+ ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vfnmsac.mask.nxv4f32.f32(
+ <vscale x 4 x float>,
+ float,
+ <vscale x 4 x float>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x float> @intrinsic_vfnmsac_mask_vf_nxv4f32_f32_nxv4f32(<vscale x 4 x float> %0, float %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv4f32_f32_nxv4f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.w.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e32,m2,ta,mu
+; CHECK-NEXT: vfnmsac.vf v16, ft0, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x float> @llvm.riscv.vfnmsac.mask.nxv4f32.f32(
+ <vscale x 4 x float> %0,
+ float %1,
+ <vscale x 4 x float> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vfnmsac.nxv8f32.f32(
+ <vscale x 8 x float>,
+ float,
+ <vscale x 8 x float>,
+ i32);
+
+define <vscale x 8 x float> @intrinsic_vfnmsac_vf_nxv8f32_f32_nxv8f32(<vscale x 8 x float> %0, float %1, <vscale x 8 x float> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv8f32_f32_nxv8f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.w.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu
+; CHECK-NEXT: vfnmsac.vf v16, ft0, v20
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x float> @llvm.riscv.vfnmsac.nxv8f32.f32(
+ <vscale x 8 x float> %0,
+ float %1,
+ <vscale x 8 x float> %2,
+ i32 %3)
+
+ ret <vscale x 8 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vfnmsac.mask.nxv8f32.f32(
+ <vscale x 8 x float>,
+ float,
+ <vscale x 8 x float>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x float> @intrinsic_vfnmsac_mask_vf_nxv8f32_f32_nxv8f32(<vscale x 8 x float> %0, float %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv8f32_f32_nxv8f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.w.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu
+; CHECK-NEXT: vfnmsac.vf v16, ft0, v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x float> @llvm.riscv.vfnmsac.mask.nxv8f32.f32(
+ <vscale x 8 x float> %0,
+ float %1,
+ <vscale x 8 x float> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x float> %a
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfnmsac-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfnmsac-rv64.ll
new file mode 100644
index 000000000000..23d83c29e247
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vfnmsac-rv64.ll
@@ -0,0 +1,1142 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
+; RUN: --riscv-no-aliases < %s | FileCheck %s
+declare <vscale x 1 x half> @llvm.riscv.vfnmsac.nxv1f16.nxv1f16(
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ i64);
+
+define <vscale x 1 x half> @intrinsic_vfnmsac_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv1f16_nxv1f16_nxv1f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu
+; CHECK-NEXT: vfnmsac.vv v16, v17, v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x half> @llvm.riscv.vfnmsac.nxv1f16.nxv1f16(
+ <vscale x 1 x half> %0,
+ <vscale x 1 x half> %1,
+ <vscale x 1 x half> %2,
+ i64 %3)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vfnmsac.mask.nxv1f16.nxv1f16(
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x half> @intrinsic_vfnmsac_mask_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv1f16_nxv1f16_nxv1f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu
+; CHECK-NEXT: vfnmsac.vv v16, v17, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x half> @llvm.riscv.vfnmsac.mask.nxv1f16.nxv1f16(
+ <vscale x 1 x half> %0,
+ <vscale x 1 x half> %1,
+ <vscale x 1 x half> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 2 x half> @llvm.riscv.vfnmsac.nxv2f16.nxv2f16(
+ <vscale x 2 x half>,
+ <vscale x 2 x half>,
+ <vscale x 2 x half>,
+ i64);
+
+define <vscale x 2 x half> @intrinsic_vfnmsac_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv2f16_nxv2f16_nxv2f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu
+; CHECK-NEXT: vfnmsac.vv v16, v17, v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x half> @llvm.riscv.vfnmsac.nxv2f16.nxv2f16(
+ <vscale x 2 x half> %0,
+ <vscale x 2 x half> %1,
+ <vscale x 2 x half> %2,
+ i64 %3)
+
+ ret <vscale x 2 x half> %a
+}
+
+declare <vscale x 2 x half> @llvm.riscv.vfnmsac.mask.nxv2f16.nxv2f16(
+ <vscale x 2 x half>,
+ <vscale x 2 x half>,
+ <vscale x 2 x half>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x half> @intrinsic_vfnmsac_mask_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv2f16_nxv2f16_nxv2f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu
+; CHECK-NEXT: vfnmsac.vv v16, v17, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x half> @llvm.riscv.vfnmsac.mask.nxv2f16.nxv2f16(
+ <vscale x 2 x half> %0,
+ <vscale x 2 x half> %1,
+ <vscale x 2 x half> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vfnmsac.nxv4f16.nxv4f16(
+ <vscale x 4 x half>,
+ <vscale x 4 x half>,
+ <vscale x 4 x half>,
+ i64);
+
+define <vscale x 4 x half> @intrinsic_vfnmsac_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv4f16_nxv4f16_nxv4f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu
+; CHECK-NEXT: vfnmsac.vv v16, v17, v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x half> @llvm.riscv.vfnmsac.nxv4f16.nxv4f16(
+ <vscale x 4 x half> %0,
+ <vscale x 4 x half> %1,
+ <vscale x 4 x half> %2,
+ i64 %3)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vfnmsac.mask.nxv4f16.nxv4f16(
+ <vscale x 4 x half>,
+ <vscale x 4 x half>,
+ <vscale x 4 x half>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x half> @intrinsic_vfnmsac_mask_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv4f16_nxv4f16_nxv4f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu
+; CHECK-NEXT: vfnmsac.vv v16, v17, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x half> @llvm.riscv.vfnmsac.mask.nxv4f16.nxv4f16(
+ <vscale x 4 x half> %0,
+ <vscale x 4 x half> %1,
+ <vscale x 4 x half> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 8 x half> @llvm.riscv.vfnmsac.nxv8f16.nxv8f16(
+ <vscale x 8 x half>,
+ <vscale x 8 x half>,
+ <vscale x 8 x half>,
+ i64);
+
+define <vscale x 8 x half> @intrinsic_vfnmsac_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv8f16_nxv8f16_nxv8f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu
+; CHECK-NEXT: vfnmsac.vv v16, v18, v20
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x half> @llvm.riscv.vfnmsac.nxv8f16.nxv8f16(
+ <vscale x 8 x half> %0,
+ <vscale x 8 x half> %1,
+ <vscale x 8 x half> %2,
+ i64 %3)
+
+ ret <vscale x 8 x half> %a
+}
+
+declare <vscale x 8 x half> @llvm.riscv.vfnmsac.mask.nxv8f16.nxv8f16(
+ <vscale x 8 x half>,
+ <vscale x 8 x half>,
+ <vscale x 8 x half>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x half> @intrinsic_vfnmsac_mask_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv8f16_nxv8f16_nxv8f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu
+; CHECK-NEXT: vfnmsac.vv v16, v18, v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x half> @llvm.riscv.vfnmsac.mask.nxv8f16.nxv8f16(
+ <vscale x 8 x half> %0,
+ <vscale x 8 x half> %1,
+ <vscale x 8 x half> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x half> %a
+}
+
+declare <vscale x 16 x half> @llvm.riscv.vfnmsac.nxv16f16.nxv16f16(
+ <vscale x 16 x half>,
+ <vscale x 16 x half>,
+ <vscale x 16 x half>,
+ i64);
+
+define <vscale x 16 x half> @intrinsic_vfnmsac_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv16f16_nxv16f16_nxv16f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, zero, e16,m4,ta,mu
+; CHECK-NEXT: vle16.v v28, (a0)
+; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu
+; CHECK-NEXT: vfnmsac.vv v16, v20, v28
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x half> @llvm.riscv.vfnmsac.nxv16f16.nxv16f16(
+ <vscale x 16 x half> %0,
+ <vscale x 16 x half> %1,
+ <vscale x 16 x half> %2,
+ i64 %3)
+
+ ret <vscale x 16 x half> %a
+}
+
+declare <vscale x 16 x half> @llvm.riscv.vfnmsac.mask.nxv16f16.nxv16f16(
+ <vscale x 16 x half>,
+ <vscale x 16 x half>,
+ <vscale x 16 x half>,
+ <vscale x 16 x i1>,
+ i64);
+
+define <vscale x 16 x half> @intrinsic_vfnmsac_mask_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv16f16_nxv16f16_nxv16f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, zero, e16,m4,ta,mu
+; CHECK-NEXT: vle16.v v28, (a0)
+; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu
+; CHECK-NEXT: vfnmsac.vv v16, v20, v28, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x half> @llvm.riscv.vfnmsac.mask.nxv16f16.nxv16f16(
+ <vscale x 16 x half> %0,
+ <vscale x 16 x half> %1,
+ <vscale x 16 x half> %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 16 x half> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vfnmsac.nxv1f32.nxv1f32(
+ <vscale x 1 x float>,
+ <vscale x 1 x float>,
+ <vscale x 1 x float>,
+ i64);
+
+define <vscale x 1 x float> @intrinsic_vfnmsac_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv1f32_nxv1f32_nxv1f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu
+; CHECK-NEXT: vfnmsac.vv v16, v17, v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vfnmsac.nxv1f32.nxv1f32(
+ <vscale x 1 x float> %0,
+ <vscale x 1 x float> %1,
+ <vscale x 1 x float> %2,
+ i64 %3)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vfnmsac.mask.nxv1f32.nxv1f32(
+ <vscale x 1 x float>,
+ <vscale x 1 x float>,
+ <vscale x 1 x float>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x float> @intrinsic_vfnmsac_mask_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv1f32_nxv1f32_nxv1f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu
+; CHECK-NEXT: vfnmsac.vv v16, v17, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vfnmsac.mask.nxv1f32.nxv1f32(
+ <vscale x 1 x float> %0,
+ <vscale x 1 x float> %1,
+ <vscale x 1 x float> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfnmsac.nxv2f32.nxv2f32(
+ <vscale x 2 x float>,
+ <vscale x 2 x float>,
+ <vscale x 2 x float>,
+ i64);
+
+define <vscale x 2 x float> @intrinsic_vfnmsac_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv2f32_nxv2f32_nxv2f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu
+; CHECK-NEXT: vfnmsac.vv v16, v17, v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x float> @llvm.riscv.vfnmsac.nxv2f32.nxv2f32(
+ <vscale x 2 x float> %0,
+ <vscale x 2 x float> %1,
+ <vscale x 2 x float> %2,
+ i64 %3)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfnmsac.mask.nxv2f32.nxv2f32(
+ <vscale x 2 x float>,
+ <vscale x 2 x float>,
+ <vscale x 2 x float>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x float> @intrinsic_vfnmsac_mask_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv2f32_nxv2f32_nxv2f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu
+; CHECK-NEXT: vfnmsac.vv v16, v17, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x float> @llvm.riscv.vfnmsac.mask.nxv2f32.nxv2f32(
+ <vscale x 2 x float> %0,
+ <vscale x 2 x float> %1,
+ <vscale x 2 x float> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vfnmsac.nxv4f32.nxv4f32(
+ <vscale x 4 x float>,
+ <vscale x 4 x float>,
+ <vscale x 4 x float>,
+ i64);
+
+define <vscale x 4 x float> @intrinsic_vfnmsac_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv4f32_nxv4f32_nxv4f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu
+; CHECK-NEXT: vfnmsac.vv v16, v18, v20
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x float> @llvm.riscv.vfnmsac.nxv4f32.nxv4f32(
+ <vscale x 4 x float> %0,
+ <vscale x 4 x float> %1,
+ <vscale x 4 x float> %2,
+ i64 %3)
+
+ ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vfnmsac.mask.nxv4f32.nxv4f32(
+ <vscale x 4 x float>,
+ <vscale x 4 x float>,
+ <vscale x 4 x float>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x float> @intrinsic_vfnmsac_mask_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv4f32_nxv4f32_nxv4f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu
+; CHECK-NEXT: vfnmsac.vv v16, v18, v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x float> @llvm.riscv.vfnmsac.mask.nxv4f32.nxv4f32(
+ <vscale x 4 x float> %0,
+ <vscale x 4 x float> %1,
+ <vscale x 4 x float> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vfnmsac.nxv8f32.nxv8f32(
+ <vscale x 8 x float>,
+ <vscale x 8 x float>,
+ <vscale x 8 x float>,
+ i64);
+
+define <vscale x 8 x float> @intrinsic_vfnmsac_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv8f32_nxv8f32_nxv8f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, zero, e32,m4,ta,mu
+; CHECK-NEXT: vle32.v v28, (a0)
+; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu
+; CHECK-NEXT: vfnmsac.vv v16, v20, v28
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x float> @llvm.riscv.vfnmsac.nxv8f32.nxv8f32(
+ <vscale x 8 x float> %0,
+ <vscale x 8 x float> %1,
+ <vscale x 8 x float> %2,
+ i64 %3)
+
+ ret <vscale x 8 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vfnmsac.mask.nxv8f32.nxv8f32(
+ <vscale x 8 x float>,
+ <vscale x 8 x float>,
+ <vscale x 8 x float>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x float> @intrinsic_vfnmsac_mask_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv8f32_nxv8f32_nxv8f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, zero, e32,m4,ta,mu
+; CHECK-NEXT: vle32.v v28, (a0)
+; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu
+; CHECK-NEXT: vfnmsac.vv v16, v20, v28, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x float> @llvm.riscv.vfnmsac.mask.nxv8f32.nxv8f32(
+ <vscale x 8 x float> %0,
+ <vscale x 8 x float> %1,
+ <vscale x 8 x float> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x float> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vfnmsac.nxv1f64.nxv1f64(
+ <vscale x 1 x double>,
+ <vscale x 1 x double>,
+ <vscale x 1 x double>,
+ i64);
+
+define <vscale x 1 x double> @intrinsic_vfnmsac_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv1f64_nxv1f64_nxv1f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu
+; CHECK-NEXT: vfnmsac.vv v16, v17, v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x double> @llvm.riscv.vfnmsac.nxv1f64.nxv1f64(
+ <vscale x 1 x double> %0,
+ <vscale x 1 x double> %1,
+ <vscale x 1 x double> %2,
+ i64 %3)
+
+ ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vfnmsac.mask.nxv1f64.nxv1f64(
+ <vscale x 1 x double>,
+ <vscale x 1 x double>,
+ <vscale x 1 x double>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x double> @intrinsic_vfnmsac_mask_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv1f64_nxv1f64_nxv1f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu
+; CHECK-NEXT: vfnmsac.vv v16, v17, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x double> @llvm.riscv.vfnmsac.mask.nxv1f64.nxv1f64(
+ <vscale x 1 x double> %0,
+ <vscale x 1 x double> %1,
+ <vscale x 1 x double> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vfnmsac.nxv2f64.nxv2f64(
+ <vscale x 2 x double>,
+ <vscale x 2 x double>,
+ <vscale x 2 x double>,
+ i64);
+
+define <vscale x 2 x double> @intrinsic_vfnmsac_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv2f64_nxv2f64_nxv2f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu
+; CHECK-NEXT: vfnmsac.vv v16, v18, v20
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x double> @llvm.riscv.vfnmsac.nxv2f64.nxv2f64(
+ <vscale x 2 x double> %0,
+ <vscale x 2 x double> %1,
+ <vscale x 2 x double> %2,
+ i64 %3)
+
+ ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vfnmsac.mask.nxv2f64.nxv2f64(
+ <vscale x 2 x double>,
+ <vscale x 2 x double>,
+ <vscale x 2 x double>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x double> @intrinsic_vfnmsac_mask_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv2f64_nxv2f64_nxv2f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu
+; CHECK-NEXT: vfnmsac.vv v16, v18, v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x double> @llvm.riscv.vfnmsac.mask.nxv2f64.nxv2f64(
+ <vscale x 2 x double> %0,
+ <vscale x 2 x double> %1,
+ <vscale x 2 x double> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vfnmsac.nxv4f64.nxv4f64(
+ <vscale x 4 x double>,
+ <vscale x 4 x double>,
+ <vscale x 4 x double>,
+ i64);
+
+define <vscale x 4 x double> @intrinsic_vfnmsac_vv_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv4f64_nxv4f64_nxv4f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, zero, e64,m4,ta,mu
+; CHECK-NEXT: vle64.v v28, (a0)
+; CHECK-NEXT: vsetvli a0, a1, e64,m4,ta,mu
+; CHECK-NEXT: vfnmsac.vv v16, v20, v28
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x double> @llvm.riscv.vfnmsac.nxv4f64.nxv4f64(
+ <vscale x 4 x double> %0,
+ <vscale x 4 x double> %1,
+ <vscale x 4 x double> %2,
+ i64 %3)
+
+ ret <vscale x 4 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vfnmsac.mask.nxv4f64.nxv4f64(
+ <vscale x 4 x double>,
+ <vscale x 4 x double>,
+ <vscale x 4 x double>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x double> @intrinsic_vfnmsac_mask_vv_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv4f64_nxv4f64_nxv4f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, zero, e64,m4,ta,mu
+; CHECK-NEXT: vle64.v v28, (a0)
+; CHECK-NEXT: vsetvli a0, a1, e64,m4,ta,mu
+; CHECK-NEXT: vfnmsac.vv v16, v20, v28, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x double> @llvm.riscv.vfnmsac.mask.nxv4f64.nxv4f64(
+ <vscale x 4 x double> %0,
+ <vscale x 4 x double> %1,
+ <vscale x 4 x double> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x double> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vfnmsac.nxv1f16.f16(
+ <vscale x 1 x half>,
+ half,
+ <vscale x 1 x half>,
+ i64);
+
+define <vscale x 1 x half> @intrinsic_vfnmsac_vf_nxv1f16_f16_nxv1f16(<vscale x 1 x half> %0, half %1, <vscale x 1 x half> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv1f16_f16_nxv1f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.h.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e16,mf4,ta,mu
+; CHECK-NEXT: vfnmsac.vf v16, ft0, v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x half> @llvm.riscv.vfnmsac.nxv1f16.f16(
+ <vscale x 1 x half> %0,
+ half %1,
+ <vscale x 1 x half> %2,
+ i64 %3)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vfnmsac.mask.nxv1f16.f16(
+ <vscale x 1 x half>,
+ half,
+ <vscale x 1 x half>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x half> @intrinsic_vfnmsac_mask_vf_nxv1f16_f16_nxv1f16(<vscale x 1 x half> %0, half %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv1f16_f16_nxv1f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.h.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e16,mf4,ta,mu
+; CHECK-NEXT: vfnmsac.vf v16, ft0, v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x half> @llvm.riscv.vfnmsac.mask.nxv1f16.f16(
+ <vscale x 1 x half> %0,
+ half %1,
+ <vscale x 1 x half> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 2 x half> @llvm.riscv.vfnmsac.nxv2f16.f16(
+ <vscale x 2 x half>,
+ half,
+ <vscale x 2 x half>,
+ i64);
+
+define <vscale x 2 x half> @intrinsic_vfnmsac_vf_nxv2f16_f16_nxv2f16(<vscale x 2 x half> %0, half %1, <vscale x 2 x half> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv2f16_f16_nxv2f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.h.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e16,mf2,ta,mu
+; CHECK-NEXT: vfnmsac.vf v16, ft0, v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x half> @llvm.riscv.vfnmsac.nxv2f16.f16(
+ <vscale x 2 x half> %0,
+ half %1,
+ <vscale x 2 x half> %2,
+ i64 %3)
+
+ ret <vscale x 2 x half> %a
+}
+
+declare <vscale x 2 x half> @llvm.riscv.vfnmsac.mask.nxv2f16.f16(
+ <vscale x 2 x half>,
+ half,
+ <vscale x 2 x half>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x half> @intrinsic_vfnmsac_mask_vf_nxv2f16_f16_nxv2f16(<vscale x 2 x half> %0, half %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv2f16_f16_nxv2f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.h.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e16,mf2,ta,mu
+; CHECK-NEXT: vfnmsac.vf v16, ft0, v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x half> @llvm.riscv.vfnmsac.mask.nxv2f16.f16(
+ <vscale x 2 x half> %0,
+ half %1,
+ <vscale x 2 x half> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vfnmsac.nxv4f16.f16(
+ <vscale x 4 x half>,
+ half,
+ <vscale x 4 x half>,
+ i64);
+
+define <vscale x 4 x half> @intrinsic_vfnmsac_vf_nxv4f16_f16_nxv4f16(<vscale x 4 x half> %0, half %1, <vscale x 4 x half> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv4f16_f16_nxv4f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.h.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e16,m1,ta,mu
+; CHECK-NEXT: vfnmsac.vf v16, ft0, v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x half> @llvm.riscv.vfnmsac.nxv4f16.f16(
+ <vscale x 4 x half> %0,
+ half %1,
+ <vscale x 4 x half> %2,
+ i64 %3)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vfnmsac.mask.nxv4f16.f16(
+ <vscale x 4 x half>,
+ half,
+ <vscale x 4 x half>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x half> @intrinsic_vfnmsac_mask_vf_nxv4f16_f16_nxv4f16(<vscale x 4 x half> %0, half %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv4f16_f16_nxv4f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.h.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e16,m1,ta,mu
+; CHECK-NEXT: vfnmsac.vf v16, ft0, v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x half> @llvm.riscv.vfnmsac.mask.nxv4f16.f16(
+ <vscale x 4 x half> %0,
+ half %1,
+ <vscale x 4 x half> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 8 x half> @llvm.riscv.vfnmsac.nxv8f16.f16(
+ <vscale x 8 x half>,
+ half,
+ <vscale x 8 x half>,
+ i64);
+
+define <vscale x 8 x half> @intrinsic_vfnmsac_vf_nxv8f16_f16_nxv8f16(<vscale x 8 x half> %0, half %1, <vscale x 8 x half> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv8f16_f16_nxv8f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.h.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e16,m2,ta,mu
+; CHECK-NEXT: vfnmsac.vf v16, ft0, v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x half> @llvm.riscv.vfnmsac.nxv8f16.f16(
+ <vscale x 8 x half> %0,
+ half %1,
+ <vscale x 8 x half> %2,
+ i64 %3)
+
+ ret <vscale x 8 x half> %a
+}
+
+declare <vscale x 8 x half> @llvm.riscv.vfnmsac.mask.nxv8f16.f16(
+ <vscale x 8 x half>,
+ half,
+ <vscale x 8 x half>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x half> @intrinsic_vfnmsac_mask_vf_nxv8f16_f16_nxv8f16(<vscale x 8 x half> %0, half %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv8f16_f16_nxv8f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.h.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e16,m2,ta,mu
+; CHECK-NEXT: vfnmsac.vf v16, ft0, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x half> @llvm.riscv.vfnmsac.mask.nxv8f16.f16(
+ <vscale x 8 x half> %0,
+ half %1,
+ <vscale x 8 x half> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x half> %a
+}
+
+declare <vscale x 16 x half> @llvm.riscv.vfnmsac.nxv16f16.f16(
+ <vscale x 16 x half>,
+ half,
+ <vscale x 16 x half>,
+ i64);
+
+define <vscale x 16 x half> @intrinsic_vfnmsac_vf_nxv16f16_f16_nxv16f16(<vscale x 16 x half> %0, half %1, <vscale x 16 x half> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv16f16_f16_nxv16f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.h.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu
+; CHECK-NEXT: vfnmsac.vf v16, ft0, v20
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x half> @llvm.riscv.vfnmsac.nxv16f16.f16(
+ <vscale x 16 x half> %0,
+ half %1,
+ <vscale x 16 x half> %2,
+ i64 %3)
+
+ ret <vscale x 16 x half> %a
+}
+
+declare <vscale x 16 x half> @llvm.riscv.vfnmsac.mask.nxv16f16.f16(
+ <vscale x 16 x half>,
+ half,
+ <vscale x 16 x half>,
+ <vscale x 16 x i1>,
+ i64);
+
+define <vscale x 16 x half> @intrinsic_vfnmsac_mask_vf_nxv16f16_f16_nxv16f16(<vscale x 16 x half> %0, half %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv16f16_f16_nxv16f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.h.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu
+; CHECK-NEXT: vfnmsac.vf v16, ft0, v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x half> @llvm.riscv.vfnmsac.mask.nxv16f16.f16(
+ <vscale x 16 x half> %0,
+ half %1,
+ <vscale x 16 x half> %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 16 x half> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vfnmsac.nxv1f32.f32(
+ <vscale x 1 x float>,
+ float,
+ <vscale x 1 x float>,
+ i64);
+
+define <vscale x 1 x float> @intrinsic_vfnmsac_vf_nxv1f32_f32_nxv1f32(<vscale x 1 x float> %0, float %1, <vscale x 1 x float> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv1f32_f32_nxv1f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.w.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e32,mf2,ta,mu
+; CHECK-NEXT: vfnmsac.vf v16, ft0, v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vfnmsac.nxv1f32.f32(
+ <vscale x 1 x float> %0,
+ float %1,
+ <vscale x 1 x float> %2,
+ i64 %3)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vfnmsac.mask.nxv1f32.f32(
+ <vscale x 1 x float>,
+ float,
+ <vscale x 1 x float>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x float> @intrinsic_vfnmsac_mask_vf_nxv1f32_f32_nxv1f32(<vscale x 1 x float> %0, float %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv1f32_f32_nxv1f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.w.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e32,mf2,ta,mu
+; CHECK-NEXT: vfnmsac.vf v16, ft0, v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vfnmsac.mask.nxv1f32.f32(
+ <vscale x 1 x float> %0,
+ float %1,
+ <vscale x 1 x float> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfnmsac.nxv2f32.f32(
+ <vscale x 2 x float>,
+ float,
+ <vscale x 2 x float>,
+ i64);
+
+define <vscale x 2 x float> @intrinsic_vfnmsac_vf_nxv2f32_f32_nxv2f32(<vscale x 2 x float> %0, float %1, <vscale x 2 x float> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv2f32_f32_nxv2f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.w.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e32,m1,ta,mu
+; CHECK-NEXT: vfnmsac.vf v16, ft0, v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x float> @llvm.riscv.vfnmsac.nxv2f32.f32(
+ <vscale x 2 x float> %0,
+ float %1,
+ <vscale x 2 x float> %2,
+ i64 %3)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfnmsac.mask.nxv2f32.f32(
+ <vscale x 2 x float>,
+ float,
+ <vscale x 2 x float>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x float> @intrinsic_vfnmsac_mask_vf_nxv2f32_f32_nxv2f32(<vscale x 2 x float> %0, float %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv2f32_f32_nxv2f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.w.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e32,m1,ta,mu
+; CHECK-NEXT: vfnmsac.vf v16, ft0, v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x float> @llvm.riscv.vfnmsac.mask.nxv2f32.f32(
+ <vscale x 2 x float> %0,
+ float %1,
+ <vscale x 2 x float> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vfnmsac.nxv4f32.f32(
+ <vscale x 4 x float>,
+ float,
+ <vscale x 4 x float>,
+ i64);
+
+define <vscale x 4 x float> @intrinsic_vfnmsac_vf_nxv4f32_f32_nxv4f32(<vscale x 4 x float> %0, float %1, <vscale x 4 x float> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv4f32_f32_nxv4f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.w.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e32,m2,ta,mu
+; CHECK-NEXT: vfnmsac.vf v16, ft0, v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x float> @llvm.riscv.vfnmsac.nxv4f32.f32(
+ <vscale x 4 x float> %0,
+ float %1,
+ <vscale x 4 x float> %2,
+ i64 %3)
+
+ ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vfnmsac.mask.nxv4f32.f32(
+ <vscale x 4 x float>,
+ float,
+ <vscale x 4 x float>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x float> @intrinsic_vfnmsac_mask_vf_nxv4f32_f32_nxv4f32(<vscale x 4 x float> %0, float %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv4f32_f32_nxv4f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.w.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e32,m2,ta,mu
+; CHECK-NEXT: vfnmsac.vf v16, ft0, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x float> @llvm.riscv.vfnmsac.mask.nxv4f32.f32(
+ <vscale x 4 x float> %0,
+ float %1,
+ <vscale x 4 x float> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vfnmsac.nxv8f32.f32(
+ <vscale x 8 x float>,
+ float,
+ <vscale x 8 x float>,
+ i64);
+
+define <vscale x 8 x float> @intrinsic_vfnmsac_vf_nxv8f32_f32_nxv8f32(<vscale x 8 x float> %0, float %1, <vscale x 8 x float> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv8f32_f32_nxv8f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.w.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu
+; CHECK-NEXT: vfnmsac.vf v16, ft0, v20
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x float> @llvm.riscv.vfnmsac.nxv8f32.f32(
+ <vscale x 8 x float> %0,
+ float %1,
+ <vscale x 8 x float> %2,
+ i64 %3)
+
+ ret <vscale x 8 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vfnmsac.mask.nxv8f32.f32(
+ <vscale x 8 x float>,
+ float,
+ <vscale x 8 x float>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x float> @intrinsic_vfnmsac_mask_vf_nxv8f32_f32_nxv8f32(<vscale x 8 x float> %0, float %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv8f32_f32_nxv8f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.w.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu
+; CHECK-NEXT: vfnmsac.vf v16, ft0, v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x float> @llvm.riscv.vfnmsac.mask.nxv8f32.f32(
+ <vscale x 8 x float> %0,
+ float %1,
+ <vscale x 8 x float> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x float> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vfnmsac.nxv1f64.f64(
+ <vscale x 1 x double>,
+ double,
+ <vscale x 1 x double>,
+ i64);
+
+define <vscale x 1 x double> @intrinsic_vfnmsac_vf_nxv1f64_f64_nxv1f64(<vscale x 1 x double> %0, double %1, <vscale x 1 x double> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv1f64_f64_nxv1f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.d.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e64,m1,ta,mu
+; CHECK-NEXT: vfnmsac.vf v16, ft0, v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x double> @llvm.riscv.vfnmsac.nxv1f64.f64(
+ <vscale x 1 x double> %0,
+ double %1,
+ <vscale x 1 x double> %2,
+ i64 %3)
+
+ ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vfnmsac.mask.nxv1f64.f64(
+ <vscale x 1 x double>,
+ double,
+ <vscale x 1 x double>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x double> @intrinsic_vfnmsac_mask_vf_nxv1f64_f64_nxv1f64(<vscale x 1 x double> %0, double %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv1f64_f64_nxv1f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.d.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e64,m1,ta,mu
+; CHECK-NEXT: vfnmsac.vf v16, ft0, v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x double> @llvm.riscv.vfnmsac.mask.nxv1f64.f64(
+ <vscale x 1 x double> %0,
+ double %1,
+ <vscale x 1 x double> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vfnmsac.nxv2f64.f64(
+ <vscale x 2 x double>,
+ double,
+ <vscale x 2 x double>,
+ i64);
+
+define <vscale x 2 x double> @intrinsic_vfnmsac_vf_nxv2f64_f64_nxv2f64(<vscale x 2 x double> %0, double %1, <vscale x 2 x double> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv2f64_f64_nxv2f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.d.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e64,m2,ta,mu
+; CHECK-NEXT: vfnmsac.vf v16, ft0, v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x double> @llvm.riscv.vfnmsac.nxv2f64.f64(
+ <vscale x 2 x double> %0,
+ double %1,
+ <vscale x 2 x double> %2,
+ i64 %3)
+
+ ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vfnmsac.mask.nxv2f64.f64(
+ <vscale x 2 x double>,
+ double,
+ <vscale x 2 x double>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x double> @intrinsic_vfnmsac_mask_vf_nxv2f64_f64_nxv2f64(<vscale x 2 x double> %0, double %1, <vscale x 2 x double> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv2f64_f64_nxv2f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.d.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e64,m2,ta,mu
+; CHECK-NEXT: vfnmsac.vf v16, ft0, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x double> @llvm.riscv.vfnmsac.mask.nxv2f64.f64(
+ <vscale x 2 x double> %0,
+ double %1,
+ <vscale x 2 x double> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vfnmsac.nxv4f64.f64(
+ <vscale x 4 x double>,
+ double,
+ <vscale x 4 x double>,
+ i64);
+
+define <vscale x 4 x double> @intrinsic_vfnmsac_vf_nxv4f64_f64_nxv4f64(<vscale x 4 x double> %0, double %1, <vscale x 4 x double> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv4f64_f64_nxv4f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.d.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e64,m4,ta,mu
+; CHECK-NEXT: vfnmsac.vf v16, ft0, v20
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x double> @llvm.riscv.vfnmsac.nxv4f64.f64(
+ <vscale x 4 x double> %0,
+ double %1,
+ <vscale x 4 x double> %2,
+ i64 %3)
+
+ ret <vscale x 4 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vfnmsac.mask.nxv4f64.f64(
+ <vscale x 4 x double>,
+ double,
+ <vscale x 4 x double>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x double> @intrinsic_vfnmsac_mask_vf_nxv4f64_f64_nxv4f64(<vscale x 4 x double> %0, double %1, <vscale x 4 x double> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv4f64_f64_nxv4f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.d.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e64,m4,ta,mu
+; CHECK-NEXT: vfnmsac.vf v16, ft0, v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x double> @llvm.riscv.vfnmsac.mask.nxv4f64.f64(
+ <vscale x 4 x double> %0,
+ double %1,
+ <vscale x 4 x double> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x double> %a
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfnmsub-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfnmsub-rv32.ll
new file mode 100644
index 000000000000..4cbeb71a8653
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vfnmsub-rv32.ll
@@ -0,0 +1,856 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f,+experimental-zfh -verify-machineinstrs \
+; RUN: --riscv-no-aliases < %s | FileCheck %s
+declare <vscale x 1 x half> @llvm.riscv.vfnmsub.nxv1f16.nxv1f16(
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ i32);
+
+define <vscale x 1 x half> @intrinsic_vfnmsub_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv1f16_nxv1f16_nxv1f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu
+; CHECK-NEXT: vfnmsub.vv v16, v17, v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x half> @llvm.riscv.vfnmsub.nxv1f16.nxv1f16(
+ <vscale x 1 x half> %0,
+ <vscale x 1 x half> %1,
+ <vscale x 1 x half> %2,
+ i32 %3)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vfnmsub.mask.nxv1f16.nxv1f16(
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x half> @intrinsic_vfnmsub_mask_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv1f16_nxv1f16_nxv1f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu
+; CHECK-NEXT: vfnmsub.vv v16, v17, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x half> @llvm.riscv.vfnmsub.mask.nxv1f16.nxv1f16(
+ <vscale x 1 x half> %0,
+ <vscale x 1 x half> %1,
+ <vscale x 1 x half> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 2 x half> @llvm.riscv.vfnmsub.nxv2f16.nxv2f16(
+ <vscale x 2 x half>,
+ <vscale x 2 x half>,
+ <vscale x 2 x half>,
+ i32);
+
+define <vscale x 2 x half> @intrinsic_vfnmsub_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv2f16_nxv2f16_nxv2f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu
+; CHECK-NEXT: vfnmsub.vv v16, v17, v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x half> @llvm.riscv.vfnmsub.nxv2f16.nxv2f16(
+ <vscale x 2 x half> %0,
+ <vscale x 2 x half> %1,
+ <vscale x 2 x half> %2,
+ i32 %3)
+
+ ret <vscale x 2 x half> %a
+}
+
+declare <vscale x 2 x half> @llvm.riscv.vfnmsub.mask.nxv2f16.nxv2f16(
+ <vscale x 2 x half>,
+ <vscale x 2 x half>,
+ <vscale x 2 x half>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x half> @intrinsic_vfnmsub_mask_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv2f16_nxv2f16_nxv2f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu
+; CHECK-NEXT: vfnmsub.vv v16, v17, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x half> @llvm.riscv.vfnmsub.mask.nxv2f16.nxv2f16(
+ <vscale x 2 x half> %0,
+ <vscale x 2 x half> %1,
+ <vscale x 2 x half> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vfnmsub.nxv4f16.nxv4f16(
+ <vscale x 4 x half>,
+ <vscale x 4 x half>,
+ <vscale x 4 x half>,
+ i32);
+
+define <vscale x 4 x half> @intrinsic_vfnmsub_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv4f16_nxv4f16_nxv4f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu
+; CHECK-NEXT: vfnmsub.vv v16, v17, v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x half> @llvm.riscv.vfnmsub.nxv4f16.nxv4f16(
+ <vscale x 4 x half> %0,
+ <vscale x 4 x half> %1,
+ <vscale x 4 x half> %2,
+ i32 %3)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vfnmsub.mask.nxv4f16.nxv4f16(
+ <vscale x 4 x half>,
+ <vscale x 4 x half>,
+ <vscale x 4 x half>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x half> @intrinsic_vfnmsub_mask_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv4f16_nxv4f16_nxv4f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu
+; CHECK-NEXT: vfnmsub.vv v16, v17, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x half> @llvm.riscv.vfnmsub.mask.nxv4f16.nxv4f16(
+ <vscale x 4 x half> %0,
+ <vscale x 4 x half> %1,
+ <vscale x 4 x half> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 8 x half> @llvm.riscv.vfnmsub.nxv8f16.nxv8f16(
+ <vscale x 8 x half>,
+ <vscale x 8 x half>,
+ <vscale x 8 x half>,
+ i32);
+
+define <vscale x 8 x half> @intrinsic_vfnmsub_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv8f16_nxv8f16_nxv8f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu
+; CHECK-NEXT: vfnmsub.vv v16, v18, v20
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x half> @llvm.riscv.vfnmsub.nxv8f16.nxv8f16(
+ <vscale x 8 x half> %0,
+ <vscale x 8 x half> %1,
+ <vscale x 8 x half> %2,
+ i32 %3)
+
+ ret <vscale x 8 x half> %a
+}
+
+declare <vscale x 8 x half> @llvm.riscv.vfnmsub.mask.nxv8f16.nxv8f16(
+ <vscale x 8 x half>,
+ <vscale x 8 x half>,
+ <vscale x 8 x half>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x half> @intrinsic_vfnmsub_mask_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv8f16_nxv8f16_nxv8f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu
+; CHECK-NEXT: vfnmsub.vv v16, v18, v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x half> @llvm.riscv.vfnmsub.mask.nxv8f16.nxv8f16(
+ <vscale x 8 x half> %0,
+ <vscale x 8 x half> %1,
+ <vscale x 8 x half> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x half> %a
+}
+
+declare <vscale x 16 x half> @llvm.riscv.vfnmsub.nxv16f16.nxv16f16(
+ <vscale x 16 x half>,
+ <vscale x 16 x half>,
+ <vscale x 16 x half>,
+ i32);
+
+define <vscale x 16 x half> @intrinsic_vfnmsub_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv16f16_nxv16f16_nxv16f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, zero, e16,m4,ta,mu
+; CHECK-NEXT: vle16.v v28, (a0)
+; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu
+; CHECK-NEXT: vfnmsub.vv v16, v20, v28
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x half> @llvm.riscv.vfnmsub.nxv16f16.nxv16f16(
+ <vscale x 16 x half> %0,
+ <vscale x 16 x half> %1,
+ <vscale x 16 x half> %2,
+ i32 %3)
+
+ ret <vscale x 16 x half> %a
+}
+
+declare <vscale x 16 x half> @llvm.riscv.vfnmsub.mask.nxv16f16.nxv16f16(
+ <vscale x 16 x half>,
+ <vscale x 16 x half>,
+ <vscale x 16 x half>,
+ <vscale x 16 x i1>,
+ i32);
+
+define <vscale x 16 x half> @intrinsic_vfnmsub_mask_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv16f16_nxv16f16_nxv16f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, zero, e16,m4,ta,mu
+; CHECK-NEXT: vle16.v v28, (a0)
+; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu
+; CHECK-NEXT: vfnmsub.vv v16, v20, v28, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x half> @llvm.riscv.vfnmsub.mask.nxv16f16.nxv16f16(
+ <vscale x 16 x half> %0,
+ <vscale x 16 x half> %1,
+ <vscale x 16 x half> %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 16 x half> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vfnmsub.nxv1f32.nxv1f32(
+ <vscale x 1 x float>,
+ <vscale x 1 x float>,
+ <vscale x 1 x float>,
+ i32);
+
+define <vscale x 1 x float> @intrinsic_vfnmsub_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv1f32_nxv1f32_nxv1f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu
+; CHECK-NEXT: vfnmsub.vv v16, v17, v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vfnmsub.nxv1f32.nxv1f32(
+ <vscale x 1 x float> %0,
+ <vscale x 1 x float> %1,
+ <vscale x 1 x float> %2,
+ i32 %3)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vfnmsub.mask.nxv1f32.nxv1f32(
+ <vscale x 1 x float>,
+ <vscale x 1 x float>,
+ <vscale x 1 x float>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x float> @intrinsic_vfnmsub_mask_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv1f32_nxv1f32_nxv1f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu
+; CHECK-NEXT: vfnmsub.vv v16, v17, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vfnmsub.mask.nxv1f32.nxv1f32(
+ <vscale x 1 x float> %0,
+ <vscale x 1 x float> %1,
+ <vscale x 1 x float> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfnmsub.nxv2f32.nxv2f32(
+ <vscale x 2 x float>,
+ <vscale x 2 x float>,
+ <vscale x 2 x float>,
+ i32);
+
+define <vscale x 2 x float> @intrinsic_vfnmsub_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv2f32_nxv2f32_nxv2f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu
+; CHECK-NEXT: vfnmsub.vv v16, v17, v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x float> @llvm.riscv.vfnmsub.nxv2f32.nxv2f32(
+ <vscale x 2 x float> %0,
+ <vscale x 2 x float> %1,
+ <vscale x 2 x float> %2,
+ i32 %3)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfnmsub.mask.nxv2f32.nxv2f32(
+ <vscale x 2 x float>,
+ <vscale x 2 x float>,
+ <vscale x 2 x float>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x float> @intrinsic_vfnmsub_mask_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv2f32_nxv2f32_nxv2f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu
+; CHECK-NEXT: vfnmsub.vv v16, v17, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x float> @llvm.riscv.vfnmsub.mask.nxv2f32.nxv2f32(
+ <vscale x 2 x float> %0,
+ <vscale x 2 x float> %1,
+ <vscale x 2 x float> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vfnmsub.nxv4f32.nxv4f32(
+ <vscale x 4 x float>,
+ <vscale x 4 x float>,
+ <vscale x 4 x float>,
+ i32);
+
+define <vscale x 4 x float> @intrinsic_vfnmsub_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv4f32_nxv4f32_nxv4f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu
+; CHECK-NEXT: vfnmsub.vv v16, v18, v20
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x float> @llvm.riscv.vfnmsub.nxv4f32.nxv4f32(
+ <vscale x 4 x float> %0,
+ <vscale x 4 x float> %1,
+ <vscale x 4 x float> %2,
+ i32 %3)
+
+ ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vfnmsub.mask.nxv4f32.nxv4f32(
+ <vscale x 4 x float>,
+ <vscale x 4 x float>,
+ <vscale x 4 x float>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x float> @intrinsic_vfnmsub_mask_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv4f32_nxv4f32_nxv4f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu
+; CHECK-NEXT: vfnmsub.vv v16, v18, v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x float> @llvm.riscv.vfnmsub.mask.nxv4f32.nxv4f32(
+ <vscale x 4 x float> %0,
+ <vscale x 4 x float> %1,
+ <vscale x 4 x float> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vfnmsub.nxv8f32.nxv8f32(
+ <vscale x 8 x float>,
+ <vscale x 8 x float>,
+ <vscale x 8 x float>,
+ i32);
+
+define <vscale x 8 x float> @intrinsic_vfnmsub_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv8f32_nxv8f32_nxv8f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, zero, e32,m4,ta,mu
+; CHECK-NEXT: vle32.v v28, (a0)
+; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu
+; CHECK-NEXT: vfnmsub.vv v16, v20, v28
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x float> @llvm.riscv.vfnmsub.nxv8f32.nxv8f32(
+ <vscale x 8 x float> %0,
+ <vscale x 8 x float> %1,
+ <vscale x 8 x float> %2,
+ i32 %3)
+
+ ret <vscale x 8 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vfnmsub.mask.nxv8f32.nxv8f32(
+ <vscale x 8 x float>,
+ <vscale x 8 x float>,
+ <vscale x 8 x float>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x float> @intrinsic_vfnmsub_mask_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv8f32_nxv8f32_nxv8f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, zero, e32,m4,ta,mu
+; CHECK-NEXT: vle32.v v28, (a0)
+; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu
+; CHECK-NEXT: vfnmsub.vv v16, v20, v28, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x float> @llvm.riscv.vfnmsub.mask.nxv8f32.nxv8f32(
+ <vscale x 8 x float> %0,
+ <vscale x 8 x float> %1,
+ <vscale x 8 x float> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x float> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vfnmsub.nxv1f16.f16(
+ <vscale x 1 x half>,
+ half,
+ <vscale x 1 x half>,
+ i32);
+
+define <vscale x 1 x half> @intrinsic_vfnmsub_vf_nxv1f16_f16_nxv1f16(<vscale x 1 x half> %0, half %1, <vscale x 1 x half> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv1f16_f16_nxv1f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.h.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e16,mf4,ta,mu
+; CHECK-NEXT: vfnmsub.vf v16, ft0, v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x half> @llvm.riscv.vfnmsub.nxv1f16.f16(
+ <vscale x 1 x half> %0,
+ half %1,
+ <vscale x 1 x half> %2,
+ i32 %3)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vfnmsub.mask.nxv1f16.f16(
+ <vscale x 1 x half>,
+ half,
+ <vscale x 1 x half>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x half> @intrinsic_vfnmsub_mask_vf_nxv1f16_f16_nxv1f16(<vscale x 1 x half> %0, half %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv1f16_f16_nxv1f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.h.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e16,mf4,ta,mu
+; CHECK-NEXT: vfnmsub.vf v16, ft0, v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x half> @llvm.riscv.vfnmsub.mask.nxv1f16.f16(
+ <vscale x 1 x half> %0,
+ half %1,
+ <vscale x 1 x half> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 2 x half> @llvm.riscv.vfnmsub.nxv2f16.f16(
+ <vscale x 2 x half>,
+ half,
+ <vscale x 2 x half>,
+ i32);
+
+define <vscale x 2 x half> @intrinsic_vfnmsub_vf_nxv2f16_f16_nxv2f16(<vscale x 2 x half> %0, half %1, <vscale x 2 x half> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv2f16_f16_nxv2f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.h.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e16,mf2,ta,mu
+; CHECK-NEXT: vfnmsub.vf v16, ft0, v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x half> @llvm.riscv.vfnmsub.nxv2f16.f16(
+ <vscale x 2 x half> %0,
+ half %1,
+ <vscale x 2 x half> %2,
+ i32 %3)
+
+ ret <vscale x 2 x half> %a
+}
+
+declare <vscale x 2 x half> @llvm.riscv.vfnmsub.mask.nxv2f16.f16(
+ <vscale x 2 x half>,
+ half,
+ <vscale x 2 x half>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x half> @intrinsic_vfnmsub_mask_vf_nxv2f16_f16_nxv2f16(<vscale x 2 x half> %0, half %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv2f16_f16_nxv2f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.h.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e16,mf2,ta,mu
+; CHECK-NEXT: vfnmsub.vf v16, ft0, v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x half> @llvm.riscv.vfnmsub.mask.nxv2f16.f16(
+ <vscale x 2 x half> %0,
+ half %1,
+ <vscale x 2 x half> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vfnmsub.nxv4f16.f16(
+ <vscale x 4 x half>,
+ half,
+ <vscale x 4 x half>,
+ i32);
+
+define <vscale x 4 x half> @intrinsic_vfnmsub_vf_nxv4f16_f16_nxv4f16(<vscale x 4 x half> %0, half %1, <vscale x 4 x half> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv4f16_f16_nxv4f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.h.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e16,m1,ta,mu
+; CHECK-NEXT: vfnmsub.vf v16, ft0, v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x half> @llvm.riscv.vfnmsub.nxv4f16.f16(
+ <vscale x 4 x half> %0,
+ half %1,
+ <vscale x 4 x half> %2,
+ i32 %3)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vfnmsub.mask.nxv4f16.f16(
+ <vscale x 4 x half>,
+ half,
+ <vscale x 4 x half>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x half> @intrinsic_vfnmsub_mask_vf_nxv4f16_f16_nxv4f16(<vscale x 4 x half> %0, half %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv4f16_f16_nxv4f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.h.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e16,m1,ta,mu
+; CHECK-NEXT: vfnmsub.vf v16, ft0, v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x half> @llvm.riscv.vfnmsub.mask.nxv4f16.f16(
+ <vscale x 4 x half> %0,
+ half %1,
+ <vscale x 4 x half> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 8 x half> @llvm.riscv.vfnmsub.nxv8f16.f16(
+ <vscale x 8 x half>,
+ half,
+ <vscale x 8 x half>,
+ i32);
+
+define <vscale x 8 x half> @intrinsic_vfnmsub_vf_nxv8f16_f16_nxv8f16(<vscale x 8 x half> %0, half %1, <vscale x 8 x half> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv8f16_f16_nxv8f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.h.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e16,m2,ta,mu
+; CHECK-NEXT: vfnmsub.vf v16, ft0, v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x half> @llvm.riscv.vfnmsub.nxv8f16.f16(
+ <vscale x 8 x half> %0,
+ half %1,
+ <vscale x 8 x half> %2,
+ i32 %3)
+
+ ret <vscale x 8 x half> %a
+}
+
+declare <vscale x 8 x half> @llvm.riscv.vfnmsub.mask.nxv8f16.f16(
+ <vscale x 8 x half>,
+ half,
+ <vscale x 8 x half>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x half> @intrinsic_vfnmsub_mask_vf_nxv8f16_f16_nxv8f16(<vscale x 8 x half> %0, half %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv8f16_f16_nxv8f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.h.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e16,m2,ta,mu
+; CHECK-NEXT: vfnmsub.vf v16, ft0, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x half> @llvm.riscv.vfnmsub.mask.nxv8f16.f16(
+ <vscale x 8 x half> %0,
+ half %1,
+ <vscale x 8 x half> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x half> %a
+}
+
+declare <vscale x 16 x half> @llvm.riscv.vfnmsub.nxv16f16.f16(
+ <vscale x 16 x half>,
+ half,
+ <vscale x 16 x half>,
+ i32);
+
+define <vscale x 16 x half> @intrinsic_vfnmsub_vf_nxv16f16_f16_nxv16f16(<vscale x 16 x half> %0, half %1, <vscale x 16 x half> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv16f16_f16_nxv16f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.h.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu
+; CHECK-NEXT: vfnmsub.vf v16, ft0, v20
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x half> @llvm.riscv.vfnmsub.nxv16f16.f16(
+ <vscale x 16 x half> %0,
+ half %1,
+ <vscale x 16 x half> %2,
+ i32 %3)
+
+ ret <vscale x 16 x half> %a
+}
+
+declare <vscale x 16 x half> @llvm.riscv.vfnmsub.mask.nxv16f16.f16(
+ <vscale x 16 x half>,
+ half,
+ <vscale x 16 x half>,
+ <vscale x 16 x i1>,
+ i32);
+
+define <vscale x 16 x half> @intrinsic_vfnmsub_mask_vf_nxv16f16_f16_nxv16f16(<vscale x 16 x half> %0, half %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv16f16_f16_nxv16f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.h.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu
+; CHECK-NEXT: vfnmsub.vf v16, ft0, v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x half> @llvm.riscv.vfnmsub.mask.nxv16f16.f16(
+ <vscale x 16 x half> %0,
+ half %1,
+ <vscale x 16 x half> %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 16 x half> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vfnmsub.nxv1f32.f32(
+ <vscale x 1 x float>,
+ float,
+ <vscale x 1 x float>,
+ i32);
+
+define <vscale x 1 x float> @intrinsic_vfnmsub_vf_nxv1f32_f32_nxv1f32(<vscale x 1 x float> %0, float %1, <vscale x 1 x float> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv1f32_f32_nxv1f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.w.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e32,mf2,ta,mu
+; CHECK-NEXT: vfnmsub.vf v16, ft0, v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vfnmsub.nxv1f32.f32(
+ <vscale x 1 x float> %0,
+ float %1,
+ <vscale x 1 x float> %2,
+ i32 %3)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vfnmsub.mask.nxv1f32.f32(
+ <vscale x 1 x float>,
+ float,
+ <vscale x 1 x float>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x float> @intrinsic_vfnmsub_mask_vf_nxv1f32_f32_nxv1f32(<vscale x 1 x float> %0, float %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv1f32_f32_nxv1f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.w.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e32,mf2,ta,mu
+; CHECK-NEXT: vfnmsub.vf v16, ft0, v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vfnmsub.mask.nxv1f32.f32(
+ <vscale x 1 x float> %0,
+ float %1,
+ <vscale x 1 x float> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfnmsub.nxv2f32.f32(
+ <vscale x 2 x float>,
+ float,
+ <vscale x 2 x float>,
+ i32);
+
+define <vscale x 2 x float> @intrinsic_vfnmsub_vf_nxv2f32_f32_nxv2f32(<vscale x 2 x float> %0, float %1, <vscale x 2 x float> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv2f32_f32_nxv2f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.w.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e32,m1,ta,mu
+; CHECK-NEXT: vfnmsub.vf v16, ft0, v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x float> @llvm.riscv.vfnmsub.nxv2f32.f32(
+ <vscale x 2 x float> %0,
+ float %1,
+ <vscale x 2 x float> %2,
+ i32 %3)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfnmsub.mask.nxv2f32.f32(
+ <vscale x 2 x float>,
+ float,
+ <vscale x 2 x float>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x float> @intrinsic_vfnmsub_mask_vf_nxv2f32_f32_nxv2f32(<vscale x 2 x float> %0, float %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv2f32_f32_nxv2f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.w.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e32,m1,ta,mu
+; CHECK-NEXT: vfnmsub.vf v16, ft0, v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x float> @llvm.riscv.vfnmsub.mask.nxv2f32.f32(
+ <vscale x 2 x float> %0,
+ float %1,
+ <vscale x 2 x float> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vfnmsub.nxv4f32.f32(
+ <vscale x 4 x float>,
+ float,
+ <vscale x 4 x float>,
+ i32);
+
+define <vscale x 4 x float> @intrinsic_vfnmsub_vf_nxv4f32_f32_nxv4f32(<vscale x 4 x float> %0, float %1, <vscale x 4 x float> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv4f32_f32_nxv4f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.w.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e32,m2,ta,mu
+; CHECK-NEXT: vfnmsub.vf v16, ft0, v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x float> @llvm.riscv.vfnmsub.nxv4f32.f32(
+ <vscale x 4 x float> %0,
+ float %1,
+ <vscale x 4 x float> %2,
+ i32 %3)
+
+ ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vfnmsub.mask.nxv4f32.f32(
+ <vscale x 4 x float>,
+ float,
+ <vscale x 4 x float>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x float> @intrinsic_vfnmsub_mask_vf_nxv4f32_f32_nxv4f32(<vscale x 4 x float> %0, float %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv4f32_f32_nxv4f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.w.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e32,m2,ta,mu
+; CHECK-NEXT: vfnmsub.vf v16, ft0, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x float> @llvm.riscv.vfnmsub.mask.nxv4f32.f32(
+ <vscale x 4 x float> %0,
+ float %1,
+ <vscale x 4 x float> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vfnmsub.nxv8f32.f32(
+ <vscale x 8 x float>,
+ float,
+ <vscale x 8 x float>,
+ i32);
+
+define <vscale x 8 x float> @intrinsic_vfnmsub_vf_nxv8f32_f32_nxv8f32(<vscale x 8 x float> %0, float %1, <vscale x 8 x float> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv8f32_f32_nxv8f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.w.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu
+; CHECK-NEXT: vfnmsub.vf v16, ft0, v20
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x float> @llvm.riscv.vfnmsub.nxv8f32.f32(
+ <vscale x 8 x float> %0,
+ float %1,
+ <vscale x 8 x float> %2,
+ i32 %3)
+
+ ret <vscale x 8 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vfnmsub.mask.nxv8f32.f32(
+ <vscale x 8 x float>,
+ float,
+ <vscale x 8 x float>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x float> @intrinsic_vfnmsub_mask_vf_nxv8f32_f32_nxv8f32(<vscale x 8 x float> %0, float %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv8f32_f32_nxv8f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.w.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu
+; CHECK-NEXT: vfnmsub.vf v16, ft0, v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x float> @llvm.riscv.vfnmsub.mask.nxv8f32.f32(
+ <vscale x 8 x float> %0,
+ float %1,
+ <vscale x 8 x float> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x float> %a
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfnmsub-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfnmsub-rv64.ll
new file mode 100644
index 000000000000..c25fee55ba36
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vfnmsub-rv64.ll
@@ -0,0 +1,1142 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
+; RUN: --riscv-no-aliases < %s | FileCheck %s
+declare <vscale x 1 x half> @llvm.riscv.vfnmsub.nxv1f16.nxv1f16(
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ i64);
+
+define <vscale x 1 x half> @intrinsic_vfnmsub_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv1f16_nxv1f16_nxv1f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu
+; CHECK-NEXT: vfnmsub.vv v16, v17, v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x half> @llvm.riscv.vfnmsub.nxv1f16.nxv1f16(
+ <vscale x 1 x half> %0,
+ <vscale x 1 x half> %1,
+ <vscale x 1 x half> %2,
+ i64 %3)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vfnmsub.mask.nxv1f16.nxv1f16(
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x half> @intrinsic_vfnmsub_mask_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv1f16_nxv1f16_nxv1f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu
+; CHECK-NEXT: vfnmsub.vv v16, v17, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x half> @llvm.riscv.vfnmsub.mask.nxv1f16.nxv1f16(
+ <vscale x 1 x half> %0,
+ <vscale x 1 x half> %1,
+ <vscale x 1 x half> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 2 x half> @llvm.riscv.vfnmsub.nxv2f16.nxv2f16(
+ <vscale x 2 x half>,
+ <vscale x 2 x half>,
+ <vscale x 2 x half>,
+ i64);
+
+define <vscale x 2 x half> @intrinsic_vfnmsub_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv2f16_nxv2f16_nxv2f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu
+; CHECK-NEXT: vfnmsub.vv v16, v17, v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x half> @llvm.riscv.vfnmsub.nxv2f16.nxv2f16(
+ <vscale x 2 x half> %0,
+ <vscale x 2 x half> %1,
+ <vscale x 2 x half> %2,
+ i64 %3)
+
+ ret <vscale x 2 x half> %a
+}
+
+declare <vscale x 2 x half> @llvm.riscv.vfnmsub.mask.nxv2f16.nxv2f16(
+ <vscale x 2 x half>,
+ <vscale x 2 x half>,
+ <vscale x 2 x half>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x half> @intrinsic_vfnmsub_mask_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv2f16_nxv2f16_nxv2f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu
+; CHECK-NEXT: vfnmsub.vv v16, v17, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x half> @llvm.riscv.vfnmsub.mask.nxv2f16.nxv2f16(
+ <vscale x 2 x half> %0,
+ <vscale x 2 x half> %1,
+ <vscale x 2 x half> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vfnmsub.nxv4f16.nxv4f16(
+ <vscale x 4 x half>,
+ <vscale x 4 x half>,
+ <vscale x 4 x half>,
+ i64);
+
+define <vscale x 4 x half> @intrinsic_vfnmsub_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv4f16_nxv4f16_nxv4f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu
+; CHECK-NEXT: vfnmsub.vv v16, v17, v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x half> @llvm.riscv.vfnmsub.nxv4f16.nxv4f16(
+ <vscale x 4 x half> %0,
+ <vscale x 4 x half> %1,
+ <vscale x 4 x half> %2,
+ i64 %3)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vfnmsub.mask.nxv4f16.nxv4f16(
+ <vscale x 4 x half>,
+ <vscale x 4 x half>,
+ <vscale x 4 x half>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x half> @intrinsic_vfnmsub_mask_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv4f16_nxv4f16_nxv4f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu
+; CHECK-NEXT: vfnmsub.vv v16, v17, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x half> @llvm.riscv.vfnmsub.mask.nxv4f16.nxv4f16(
+ <vscale x 4 x half> %0,
+ <vscale x 4 x half> %1,
+ <vscale x 4 x half> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 8 x half> @llvm.riscv.vfnmsub.nxv8f16.nxv8f16(
+ <vscale x 8 x half>,
+ <vscale x 8 x half>,
+ <vscale x 8 x half>,
+ i64);
+
+define <vscale x 8 x half> @intrinsic_vfnmsub_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv8f16_nxv8f16_nxv8f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu
+; CHECK-NEXT: vfnmsub.vv v16, v18, v20
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x half> @llvm.riscv.vfnmsub.nxv8f16.nxv8f16(
+ <vscale x 8 x half> %0,
+ <vscale x 8 x half> %1,
+ <vscale x 8 x half> %2,
+ i64 %3)
+
+ ret <vscale x 8 x half> %a
+}
+
+declare <vscale x 8 x half> @llvm.riscv.vfnmsub.mask.nxv8f16.nxv8f16(
+ <vscale x 8 x half>,
+ <vscale x 8 x half>,
+ <vscale x 8 x half>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x half> @intrinsic_vfnmsub_mask_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv8f16_nxv8f16_nxv8f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu
+; CHECK-NEXT: vfnmsub.vv v16, v18, v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x half> @llvm.riscv.vfnmsub.mask.nxv8f16.nxv8f16(
+ <vscale x 8 x half> %0,
+ <vscale x 8 x half> %1,
+ <vscale x 8 x half> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x half> %a
+}
+
+declare <vscale x 16 x half> @llvm.riscv.vfnmsub.nxv16f16.nxv16f16(
+ <vscale x 16 x half>,
+ <vscale x 16 x half>,
+ <vscale x 16 x half>,
+ i64);
+
+define <vscale x 16 x half> @intrinsic_vfnmsub_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv16f16_nxv16f16_nxv16f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, zero, e16,m4,ta,mu
+; CHECK-NEXT: vle16.v v28, (a0)
+; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu
+; CHECK-NEXT: vfnmsub.vv v16, v20, v28
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x half> @llvm.riscv.vfnmsub.nxv16f16.nxv16f16(
+ <vscale x 16 x half> %0,
+ <vscale x 16 x half> %1,
+ <vscale x 16 x half> %2,
+ i64 %3)
+
+ ret <vscale x 16 x half> %a
+}
+
+declare <vscale x 16 x half> @llvm.riscv.vfnmsub.mask.nxv16f16.nxv16f16(
+ <vscale x 16 x half>,
+ <vscale x 16 x half>,
+ <vscale x 16 x half>,
+ <vscale x 16 x i1>,
+ i64);
+
+define <vscale x 16 x half> @intrinsic_vfnmsub_mask_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv16f16_nxv16f16_nxv16f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, zero, e16,m4,ta,mu
+; CHECK-NEXT: vle16.v v28, (a0)
+; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu
+; CHECK-NEXT: vfnmsub.vv v16, v20, v28, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x half> @llvm.riscv.vfnmsub.mask.nxv16f16.nxv16f16(
+ <vscale x 16 x half> %0,
+ <vscale x 16 x half> %1,
+ <vscale x 16 x half> %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 16 x half> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vfnmsub.nxv1f32.nxv1f32(
+ <vscale x 1 x float>,
+ <vscale x 1 x float>,
+ <vscale x 1 x float>,
+ i64);
+
+define <vscale x 1 x float> @intrinsic_vfnmsub_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv1f32_nxv1f32_nxv1f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu
+; CHECK-NEXT: vfnmsub.vv v16, v17, v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vfnmsub.nxv1f32.nxv1f32(
+ <vscale x 1 x float> %0,
+ <vscale x 1 x float> %1,
+ <vscale x 1 x float> %2,
+ i64 %3)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vfnmsub.mask.nxv1f32.nxv1f32(
+ <vscale x 1 x float>,
+ <vscale x 1 x float>,
+ <vscale x 1 x float>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x float> @intrinsic_vfnmsub_mask_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv1f32_nxv1f32_nxv1f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu
+; CHECK-NEXT: vfnmsub.vv v16, v17, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vfnmsub.mask.nxv1f32.nxv1f32(
+ <vscale x 1 x float> %0,
+ <vscale x 1 x float> %1,
+ <vscale x 1 x float> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfnmsub.nxv2f32.nxv2f32(
+ <vscale x 2 x float>,
+ <vscale x 2 x float>,
+ <vscale x 2 x float>,
+ i64);
+
+define <vscale x 2 x float> @intrinsic_vfnmsub_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv2f32_nxv2f32_nxv2f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu
+; CHECK-NEXT: vfnmsub.vv v16, v17, v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x float> @llvm.riscv.vfnmsub.nxv2f32.nxv2f32(
+ <vscale x 2 x float> %0,
+ <vscale x 2 x float> %1,
+ <vscale x 2 x float> %2,
+ i64 %3)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfnmsub.mask.nxv2f32.nxv2f32(
+ <vscale x 2 x float>,
+ <vscale x 2 x float>,
+ <vscale x 2 x float>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x float> @intrinsic_vfnmsub_mask_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv2f32_nxv2f32_nxv2f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu
+; CHECK-NEXT: vfnmsub.vv v16, v17, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x float> @llvm.riscv.vfnmsub.mask.nxv2f32.nxv2f32(
+ <vscale x 2 x float> %0,
+ <vscale x 2 x float> %1,
+ <vscale x 2 x float> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vfnmsub.nxv4f32.nxv4f32(
+ <vscale x 4 x float>,
+ <vscale x 4 x float>,
+ <vscale x 4 x float>,
+ i64);
+
+define <vscale x 4 x float> @intrinsic_vfnmsub_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv4f32_nxv4f32_nxv4f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu
+; CHECK-NEXT: vfnmsub.vv v16, v18, v20
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x float> @llvm.riscv.vfnmsub.nxv4f32.nxv4f32(
+ <vscale x 4 x float> %0,
+ <vscale x 4 x float> %1,
+ <vscale x 4 x float> %2,
+ i64 %3)
+
+ ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vfnmsub.mask.nxv4f32.nxv4f32(
+ <vscale x 4 x float>,
+ <vscale x 4 x float>,
+ <vscale x 4 x float>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x float> @intrinsic_vfnmsub_mask_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv4f32_nxv4f32_nxv4f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu
+; CHECK-NEXT: vfnmsub.vv v16, v18, v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x float> @llvm.riscv.vfnmsub.mask.nxv4f32.nxv4f32(
+ <vscale x 4 x float> %0,
+ <vscale x 4 x float> %1,
+ <vscale x 4 x float> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vfnmsub.nxv8f32.nxv8f32(
+ <vscale x 8 x float>,
+ <vscale x 8 x float>,
+ <vscale x 8 x float>,
+ i64);
+
+define <vscale x 8 x float> @intrinsic_vfnmsub_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv8f32_nxv8f32_nxv8f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, zero, e32,m4,ta,mu
+; CHECK-NEXT: vle32.v v28, (a0)
+; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu
+; CHECK-NEXT: vfnmsub.vv v16, v20, v28
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x float> @llvm.riscv.vfnmsub.nxv8f32.nxv8f32(
+ <vscale x 8 x float> %0,
+ <vscale x 8 x float> %1,
+ <vscale x 8 x float> %2,
+ i64 %3)
+
+ ret <vscale x 8 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vfnmsub.mask.nxv8f32.nxv8f32(
+ <vscale x 8 x float>,
+ <vscale x 8 x float>,
+ <vscale x 8 x float>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x float> @intrinsic_vfnmsub_mask_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv8f32_nxv8f32_nxv8f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, zero, e32,m4,ta,mu
+; CHECK-NEXT: vle32.v v28, (a0)
+; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu
+; CHECK-NEXT: vfnmsub.vv v16, v20, v28, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x float> @llvm.riscv.vfnmsub.mask.nxv8f32.nxv8f32(
+ <vscale x 8 x float> %0,
+ <vscale x 8 x float> %1,
+ <vscale x 8 x float> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x float> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vfnmsub.nxv1f64.nxv1f64(
+ <vscale x 1 x double>,
+ <vscale x 1 x double>,
+ <vscale x 1 x double>,
+ i64);
+
+define <vscale x 1 x double> @intrinsic_vfnmsub_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv1f64_nxv1f64_nxv1f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu
+; CHECK-NEXT: vfnmsub.vv v16, v17, v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x double> @llvm.riscv.vfnmsub.nxv1f64.nxv1f64(
+ <vscale x 1 x double> %0,
+ <vscale x 1 x double> %1,
+ <vscale x 1 x double> %2,
+ i64 %3)
+
+ ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vfnmsub.mask.nxv1f64.nxv1f64(
+ <vscale x 1 x double>,
+ <vscale x 1 x double>,
+ <vscale x 1 x double>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x double> @intrinsic_vfnmsub_mask_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv1f64_nxv1f64_nxv1f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu
+; CHECK-NEXT: vfnmsub.vv v16, v17, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x double> @llvm.riscv.vfnmsub.mask.nxv1f64.nxv1f64(
+ <vscale x 1 x double> %0,
+ <vscale x 1 x double> %1,
+ <vscale x 1 x double> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vfnmsub.nxv2f64.nxv2f64(
+ <vscale x 2 x double>,
+ <vscale x 2 x double>,
+ <vscale x 2 x double>,
+ i64);
+
+define <vscale x 2 x double> @intrinsic_vfnmsub_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv2f64_nxv2f64_nxv2f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu
+; CHECK-NEXT: vfnmsub.vv v16, v18, v20
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x double> @llvm.riscv.vfnmsub.nxv2f64.nxv2f64(
+ <vscale x 2 x double> %0,
+ <vscale x 2 x double> %1,
+ <vscale x 2 x double> %2,
+ i64 %3)
+
+ ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vfnmsub.mask.nxv2f64.nxv2f64(
+ <vscale x 2 x double>,
+ <vscale x 2 x double>,
+ <vscale x 2 x double>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x double> @intrinsic_vfnmsub_mask_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv2f64_nxv2f64_nxv2f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu
+; CHECK-NEXT: vfnmsub.vv v16, v18, v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x double> @llvm.riscv.vfnmsub.mask.nxv2f64.nxv2f64(
+ <vscale x 2 x double> %0,
+ <vscale x 2 x double> %1,
+ <vscale x 2 x double> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vfnmsub.nxv4f64.nxv4f64(
+ <vscale x 4 x double>,
+ <vscale x 4 x double>,
+ <vscale x 4 x double>,
+ i64);
+
+define <vscale x 4 x double> @intrinsic_vfnmsub_vv_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv4f64_nxv4f64_nxv4f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, zero, e64,m4,ta,mu
+; CHECK-NEXT: vle64.v v28, (a0)
+; CHECK-NEXT: vsetvli a0, a1, e64,m4,ta,mu
+; CHECK-NEXT: vfnmsub.vv v16, v20, v28
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x double> @llvm.riscv.vfnmsub.nxv4f64.nxv4f64(
+ <vscale x 4 x double> %0,
+ <vscale x 4 x double> %1,
+ <vscale x 4 x double> %2,
+ i64 %3)
+
+ ret <vscale x 4 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vfnmsub.mask.nxv4f64.nxv4f64(
+ <vscale x 4 x double>,
+ <vscale x 4 x double>,
+ <vscale x 4 x double>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x double> @intrinsic_vfnmsub_mask_vv_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv4f64_nxv4f64_nxv4f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, zero, e64,m4,ta,mu
+; CHECK-NEXT: vle64.v v28, (a0)
+; CHECK-NEXT: vsetvli a0, a1, e64,m4,ta,mu
+; CHECK-NEXT: vfnmsub.vv v16, v20, v28, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x double> @llvm.riscv.vfnmsub.mask.nxv4f64.nxv4f64(
+ <vscale x 4 x double> %0,
+ <vscale x 4 x double> %1,
+ <vscale x 4 x double> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x double> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vfnmsub.nxv1f16.f16(
+ <vscale x 1 x half>,
+ half,
+ <vscale x 1 x half>,
+ i64);
+
+define <vscale x 1 x half> @intrinsic_vfnmsub_vf_nxv1f16_f16_nxv1f16(<vscale x 1 x half> %0, half %1, <vscale x 1 x half> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv1f16_f16_nxv1f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.h.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e16,mf4,ta,mu
+; CHECK-NEXT: vfnmsub.vf v16, ft0, v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x half> @llvm.riscv.vfnmsub.nxv1f16.f16(
+ <vscale x 1 x half> %0,
+ half %1,
+ <vscale x 1 x half> %2,
+ i64 %3)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vfnmsub.mask.nxv1f16.f16(
+ <vscale x 1 x half>,
+ half,
+ <vscale x 1 x half>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x half> @intrinsic_vfnmsub_mask_vf_nxv1f16_f16_nxv1f16(<vscale x 1 x half> %0, half %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv1f16_f16_nxv1f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.h.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e16,mf4,ta,mu
+; CHECK-NEXT: vfnmsub.vf v16, ft0, v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x half> @llvm.riscv.vfnmsub.mask.nxv1f16.f16(
+ <vscale x 1 x half> %0,
+ half %1,
+ <vscale x 1 x half> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 2 x half> @llvm.riscv.vfnmsub.nxv2f16.f16(
+ <vscale x 2 x half>,
+ half,
+ <vscale x 2 x half>,
+ i64);
+
+define <vscale x 2 x half> @intrinsic_vfnmsub_vf_nxv2f16_f16_nxv2f16(<vscale x 2 x half> %0, half %1, <vscale x 2 x half> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv2f16_f16_nxv2f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.h.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e16,mf2,ta,mu
+; CHECK-NEXT: vfnmsub.vf v16, ft0, v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x half> @llvm.riscv.vfnmsub.nxv2f16.f16(
+ <vscale x 2 x half> %0,
+ half %1,
+ <vscale x 2 x half> %2,
+ i64 %3)
+
+ ret <vscale x 2 x half> %a
+}
+
+declare <vscale x 2 x half> @llvm.riscv.vfnmsub.mask.nxv2f16.f16(
+ <vscale x 2 x half>,
+ half,
+ <vscale x 2 x half>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x half> @intrinsic_vfnmsub_mask_vf_nxv2f16_f16_nxv2f16(<vscale x 2 x half> %0, half %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv2f16_f16_nxv2f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.h.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e16,mf2,ta,mu
+; CHECK-NEXT: vfnmsub.vf v16, ft0, v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x half> @llvm.riscv.vfnmsub.mask.nxv2f16.f16(
+ <vscale x 2 x half> %0,
+ half %1,
+ <vscale x 2 x half> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vfnmsub.nxv4f16.f16(
+ <vscale x 4 x half>,
+ half,
+ <vscale x 4 x half>,
+ i64);
+
+define <vscale x 4 x half> @intrinsic_vfnmsub_vf_nxv4f16_f16_nxv4f16(<vscale x 4 x half> %0, half %1, <vscale x 4 x half> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv4f16_f16_nxv4f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.h.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e16,m1,ta,mu
+; CHECK-NEXT: vfnmsub.vf v16, ft0, v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x half> @llvm.riscv.vfnmsub.nxv4f16.f16(
+ <vscale x 4 x half> %0,
+ half %1,
+ <vscale x 4 x half> %2,
+ i64 %3)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vfnmsub.mask.nxv4f16.f16(
+ <vscale x 4 x half>,
+ half,
+ <vscale x 4 x half>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x half> @intrinsic_vfnmsub_mask_vf_nxv4f16_f16_nxv4f16(<vscale x 4 x half> %0, half %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv4f16_f16_nxv4f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.h.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e16,m1,ta,mu
+; CHECK-NEXT: vfnmsub.vf v16, ft0, v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x half> @llvm.riscv.vfnmsub.mask.nxv4f16.f16(
+ <vscale x 4 x half> %0,
+ half %1,
+ <vscale x 4 x half> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 8 x half> @llvm.riscv.vfnmsub.nxv8f16.f16(
+ <vscale x 8 x half>,
+ half,
+ <vscale x 8 x half>,
+ i64);
+
+define <vscale x 8 x half> @intrinsic_vfnmsub_vf_nxv8f16_f16_nxv8f16(<vscale x 8 x half> %0, half %1, <vscale x 8 x half> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv8f16_f16_nxv8f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.h.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e16,m2,ta,mu
+; CHECK-NEXT: vfnmsub.vf v16, ft0, v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x half> @llvm.riscv.vfnmsub.nxv8f16.f16(
+ <vscale x 8 x half> %0,
+ half %1,
+ <vscale x 8 x half> %2,
+ i64 %3)
+
+ ret <vscale x 8 x half> %a
+}
+
+declare <vscale x 8 x half> @llvm.riscv.vfnmsub.mask.nxv8f16.f16(
+ <vscale x 8 x half>,
+ half,
+ <vscale x 8 x half>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x half> @intrinsic_vfnmsub_mask_vf_nxv8f16_f16_nxv8f16(<vscale x 8 x half> %0, half %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv8f16_f16_nxv8f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.h.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e16,m2,ta,mu
+; CHECK-NEXT: vfnmsub.vf v16, ft0, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x half> @llvm.riscv.vfnmsub.mask.nxv8f16.f16(
+ <vscale x 8 x half> %0,
+ half %1,
+ <vscale x 8 x half> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x half> %a
+}
+
+declare <vscale x 16 x half> @llvm.riscv.vfnmsub.nxv16f16.f16(
+ <vscale x 16 x half>,
+ half,
+ <vscale x 16 x half>,
+ i64);
+
+define <vscale x 16 x half> @intrinsic_vfnmsub_vf_nxv16f16_f16_nxv16f16(<vscale x 16 x half> %0, half %1, <vscale x 16 x half> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv16f16_f16_nxv16f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.h.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu
+; CHECK-NEXT: vfnmsub.vf v16, ft0, v20
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x half> @llvm.riscv.vfnmsub.nxv16f16.f16(
+ <vscale x 16 x half> %0,
+ half %1,
+ <vscale x 16 x half> %2,
+ i64 %3)
+
+ ret <vscale x 16 x half> %a
+}
+
+declare <vscale x 16 x half> @llvm.riscv.vfnmsub.mask.nxv16f16.f16(
+ <vscale x 16 x half>,
+ half,
+ <vscale x 16 x half>,
+ <vscale x 16 x i1>,
+ i64);
+
+define <vscale x 16 x half> @intrinsic_vfnmsub_mask_vf_nxv16f16_f16_nxv16f16(<vscale x 16 x half> %0, half %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv16f16_f16_nxv16f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.h.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu
+; CHECK-NEXT: vfnmsub.vf v16, ft0, v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x half> @llvm.riscv.vfnmsub.mask.nxv16f16.f16(
+ <vscale x 16 x half> %0,
+ half %1,
+ <vscale x 16 x half> %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 16 x half> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vfnmsub.nxv1f32.f32(
+ <vscale x 1 x float>,
+ float,
+ <vscale x 1 x float>,
+ i64);
+
+define <vscale x 1 x float> @intrinsic_vfnmsub_vf_nxv1f32_f32_nxv1f32(<vscale x 1 x float> %0, float %1, <vscale x 1 x float> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv1f32_f32_nxv1f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.w.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e32,mf2,ta,mu
+; CHECK-NEXT: vfnmsub.vf v16, ft0, v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vfnmsub.nxv1f32.f32(
+ <vscale x 1 x float> %0,
+ float %1,
+ <vscale x 1 x float> %2,
+ i64 %3)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vfnmsub.mask.nxv1f32.f32(
+ <vscale x 1 x float>,
+ float,
+ <vscale x 1 x float>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x float> @intrinsic_vfnmsub_mask_vf_nxv1f32_f32_nxv1f32(<vscale x 1 x float> %0, float %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv1f32_f32_nxv1f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.w.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e32,mf2,ta,mu
+; CHECK-NEXT: vfnmsub.vf v16, ft0, v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vfnmsub.mask.nxv1f32.f32(
+ <vscale x 1 x float> %0,
+ float %1,
+ <vscale x 1 x float> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfnmsub.nxv2f32.f32(
+ <vscale x 2 x float>,
+ float,
+ <vscale x 2 x float>,
+ i64);
+
+define <vscale x 2 x float> @intrinsic_vfnmsub_vf_nxv2f32_f32_nxv2f32(<vscale x 2 x float> %0, float %1, <vscale x 2 x float> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv2f32_f32_nxv2f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.w.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e32,m1,ta,mu
+; CHECK-NEXT: vfnmsub.vf v16, ft0, v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x float> @llvm.riscv.vfnmsub.nxv2f32.f32(
+ <vscale x 2 x float> %0,
+ float %1,
+ <vscale x 2 x float> %2,
+ i64 %3)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfnmsub.mask.nxv2f32.f32(
+ <vscale x 2 x float>,
+ float,
+ <vscale x 2 x float>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x float> @intrinsic_vfnmsub_mask_vf_nxv2f32_f32_nxv2f32(<vscale x 2 x float> %0, float %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv2f32_f32_nxv2f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.w.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e32,m1,ta,mu
+; CHECK-NEXT: vfnmsub.vf v16, ft0, v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x float> @llvm.riscv.vfnmsub.mask.nxv2f32.f32(
+ <vscale x 2 x float> %0,
+ float %1,
+ <vscale x 2 x float> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vfnmsub.nxv4f32.f32(
+ <vscale x 4 x float>,
+ float,
+ <vscale x 4 x float>,
+ i64);
+
+define <vscale x 4 x float> @intrinsic_vfnmsub_vf_nxv4f32_f32_nxv4f32(<vscale x 4 x float> %0, float %1, <vscale x 4 x float> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv4f32_f32_nxv4f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.w.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e32,m2,ta,mu
+; CHECK-NEXT: vfnmsub.vf v16, ft0, v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x float> @llvm.riscv.vfnmsub.nxv4f32.f32(
+ <vscale x 4 x float> %0,
+ float %1,
+ <vscale x 4 x float> %2,
+ i64 %3)
+
+ ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vfnmsub.mask.nxv4f32.f32(
+ <vscale x 4 x float>,
+ float,
+ <vscale x 4 x float>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x float> @intrinsic_vfnmsub_mask_vf_nxv4f32_f32_nxv4f32(<vscale x 4 x float> %0, float %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv4f32_f32_nxv4f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.w.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e32,m2,ta,mu
+; CHECK-NEXT: vfnmsub.vf v16, ft0, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x float> @llvm.riscv.vfnmsub.mask.nxv4f32.f32(
+ <vscale x 4 x float> %0,
+ float %1,
+ <vscale x 4 x float> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vfnmsub.nxv8f32.f32(
+ <vscale x 8 x float>,
+ float,
+ <vscale x 8 x float>,
+ i64);
+
+define <vscale x 8 x float> @intrinsic_vfnmsub_vf_nxv8f32_f32_nxv8f32(<vscale x 8 x float> %0, float %1, <vscale x 8 x float> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv8f32_f32_nxv8f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.w.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu
+; CHECK-NEXT: vfnmsub.vf v16, ft0, v20
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x float> @llvm.riscv.vfnmsub.nxv8f32.f32(
+ <vscale x 8 x float> %0,
+ float %1,
+ <vscale x 8 x float> %2,
+ i64 %3)
+
+ ret <vscale x 8 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vfnmsub.mask.nxv8f32.f32(
+ <vscale x 8 x float>,
+ float,
+ <vscale x 8 x float>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x float> @intrinsic_vfnmsub_mask_vf_nxv8f32_f32_nxv8f32(<vscale x 8 x float> %0, float %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv8f32_f32_nxv8f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.w.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu
+; CHECK-NEXT: vfnmsub.vf v16, ft0, v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x float> @llvm.riscv.vfnmsub.mask.nxv8f32.f32(
+ <vscale x 8 x float> %0,
+ float %1,
+ <vscale x 8 x float> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x float> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vfnmsub.nxv1f64.f64(
+ <vscale x 1 x double>,
+ double,
+ <vscale x 1 x double>,
+ i64);
+
+define <vscale x 1 x double> @intrinsic_vfnmsub_vf_nxv1f64_f64_nxv1f64(<vscale x 1 x double> %0, double %1, <vscale x 1 x double> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv1f64_f64_nxv1f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.d.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e64,m1,ta,mu
+; CHECK-NEXT: vfnmsub.vf v16, ft0, v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x double> @llvm.riscv.vfnmsub.nxv1f64.f64(
+ <vscale x 1 x double> %0,
+ double %1,
+ <vscale x 1 x double> %2,
+ i64 %3)
+
+ ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vfnmsub.mask.nxv1f64.f64(
+ <vscale x 1 x double>,
+ double,
+ <vscale x 1 x double>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x double> @intrinsic_vfnmsub_mask_vf_nxv1f64_f64_nxv1f64(<vscale x 1 x double> %0, double %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv1f64_f64_nxv1f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.d.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e64,m1,ta,mu
+; CHECK-NEXT: vfnmsub.vf v16, ft0, v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x double> @llvm.riscv.vfnmsub.mask.nxv1f64.f64(
+ <vscale x 1 x double> %0,
+ double %1,
+ <vscale x 1 x double> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vfnmsub.nxv2f64.f64(
+ <vscale x 2 x double>,
+ double,
+ <vscale x 2 x double>,
+ i64);
+
+define <vscale x 2 x double> @intrinsic_vfnmsub_vf_nxv2f64_f64_nxv2f64(<vscale x 2 x double> %0, double %1, <vscale x 2 x double> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv2f64_f64_nxv2f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.d.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e64,m2,ta,mu
+; CHECK-NEXT: vfnmsub.vf v16, ft0, v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x double> @llvm.riscv.vfnmsub.nxv2f64.f64(
+ <vscale x 2 x double> %0,
+ double %1,
+ <vscale x 2 x double> %2,
+ i64 %3)
+
+ ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vfnmsub.mask.nxv2f64.f64(
+ <vscale x 2 x double>,
+ double,
+ <vscale x 2 x double>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x double> @intrinsic_vfnmsub_mask_vf_nxv2f64_f64_nxv2f64(<vscale x 2 x double> %0, double %1, <vscale x 2 x double> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv2f64_f64_nxv2f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.d.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e64,m2,ta,mu
+; CHECK-NEXT: vfnmsub.vf v16, ft0, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x double> @llvm.riscv.vfnmsub.mask.nxv2f64.f64(
+ <vscale x 2 x double> %0,
+ double %1,
+ <vscale x 2 x double> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vfnmsub.nxv4f64.f64(
+ <vscale x 4 x double>,
+ double,
+ <vscale x 4 x double>,
+ i64);
+
+define <vscale x 4 x double> @intrinsic_vfnmsub_vf_nxv4f64_f64_nxv4f64(<vscale x 4 x double> %0, double %1, <vscale x 4 x double> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv4f64_f64_nxv4f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.d.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e64,m4,ta,mu
+; CHECK-NEXT: vfnmsub.vf v16, ft0, v20
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x double> @llvm.riscv.vfnmsub.nxv4f64.f64(
+ <vscale x 4 x double> %0,
+ double %1,
+ <vscale x 4 x double> %2,
+ i64 %3)
+
+ ret <vscale x 4 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vfnmsub.mask.nxv4f64.f64(
+ <vscale x 4 x double>,
+ double,
+ <vscale x 4 x double>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x double> @intrinsic_vfnmsub_mask_vf_nxv4f64_f64_nxv4f64(<vscale x 4 x double> %0, double %1, <vscale x 4 x double> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv4f64_f64_nxv4f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fmv.d.x ft0, a0
+; CHECK-NEXT: vsetvli a0, a1, e64,m4,ta,mu
+; CHECK-NEXT: vfnmsub.vf v16, ft0, v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x double> @llvm.riscv.vfnmsub.mask.nxv4f64.f64(
+ <vscale x 4 x double> %0,
+ double %1,
+ <vscale x 4 x double> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x double> %a
+}
More information about the llvm-branch-commits
mailing list