[llvm] [RISCV][GISel] Support select vx, vf form rvv intrinsics (PR #157398)
Jianjian Guan via llvm-commits
llvm-commits at lists.llvm.org
Mon Sep 8 00:31:50 PDT 2025
https://github.com/jacquesguan created https://github.com/llvm/llvm-project/pull/157398
This pr is the next step of https://github.com/llvm/llvm-project/pull/156415. For vx form, we legalize it with widen scalar. And for vf form, we select the right register bank.
>From f8ddd8b7bc6c22d3d8ec4d203280c517d1758809 Mon Sep 17 00:00:00 2001
From: Jianjian GUAN <jacquesguan at me.com>
Date: Tue, 2 Sep 2025 15:14:36 +0800
Subject: [PATCH 1/5] [RISCV][GISel] Add initial support for rvv intrinsics
This pr removes the falling back to SDISel of rvv intrinsics and marks them legalized in the legalize pass.
---
.../Target/RISCV/GISel/RISCVLegalizerInfo.cpp | 8 +
llvm/lib/Target/RISCV/RISCVISelLowering.cpp | 12 +
.../RISCV/GlobalISel/irtranslator/fallback.ll | 33 -
.../CodeGen/RISCV/GlobalISel/rvv/vfadd.ll | 764 ++++++++++++++++++
4 files changed, 784 insertions(+), 33 deletions(-)
create mode 100644 llvm/test/CodeGen/RISCV/GlobalISel/rvv/vfadd.ll
diff --git a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
index ab5c9e17b9a37..2d175c7bf7899 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
+++ b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
@@ -714,6 +714,14 @@ RISCVLegalizerInfo::RISCVLegalizerInfo(const RISCVSubtarget &ST)
bool RISCVLegalizerInfo::legalizeIntrinsic(LegalizerHelper &Helper,
MachineInstr &MI) const {
Intrinsic::ID IntrinsicID = cast<GIntrinsic>(MI).getIntrinsicID();
+
+ const RISCVVIntrinsicsTable::RISCVVIntrinsicInfo *II =
+ RISCVVIntrinsicsTable::getRISCVVIntrinsicInfo(IntrinsicID);
+
+ if (II) {
+ return true;
+ }
+
switch (IntrinsicID) {
default:
return false;
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 3ab08f990c289..e2ea0e77ca23a 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -24880,6 +24880,18 @@ bool RISCVTargetLowering::fallBackToDAGISel(const Instruction &Inst) const {
Op == Instruction::Freeze || Op == Instruction::Store)
return false;
+ if (Op == Instruction::Call) {
+ const CallInst &CI = cast<CallInst>(Inst);
+ const Function *F = CI.getCalledFunction();
+ Intrinsic::ID ID = F ? F->getIntrinsicID() : Intrinsic::not_intrinsic;
+
+ const RISCVVIntrinsicsTable::RISCVVIntrinsicInfo *II =
+ RISCVVIntrinsicsTable::getRISCVVIntrinsicInfo(ID);
+ // Mark RVV intrinsic is supported.
+ if (II)
+ return false;
+ }
+
if (Inst.getType()->isScalableTy())
return true;
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/fallback.ll b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/fallback.ll
index 49276c9416234..8e43e044b7ee5 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/fallback.ll
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/fallback.ll
@@ -2,39 +2,6 @@
; RUN: FileCheck %s --check-prefix=FALLBACK-WITH-REPORT-OUT < %t.out
; RUN: FileCheck %s --check-prefix=FALLBACK-WITH-REPORT-ERR < %t.err
-
-declare <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.nxv1i8(
- <vscale x 1 x i8>,
- <vscale x 1 x i8>,
- <vscale x 1 x i8>,
- i64)
-
-; FALLBACK_WITH_REPORT_ERR: <unknown>:0:0: unable to translate instruction: call
-; FALLBACK-WITH-REPORT-OUT-LABEL: scalable_arg
-define <vscale x 1 x i8> @scalable_arg(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i64 %2) nounwind {
-entry:
- %a = call <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.nxv1i8(
- <vscale x 1 x i8> undef,
- <vscale x 1 x i8> %0,
- <vscale x 1 x i8> %1,
- i64 %2)
-
- ret <vscale x 1 x i8> %a
-}
-
-; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: unable to translate instruction: call
-; FALLBACK-WITH-REPORT-OUT-LABEL: scalable_inst
-define <vscale x 1 x i8> @scalable_inst(i64 %0) nounwind {
-entry:
- %a = call <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.nxv1i8(
- <vscale x 1 x i8> undef,
- <vscale x 1 x i8> undef,
- <vscale x 1 x i8> undef,
- i64 %0)
-
- ret <vscale x 1 x i8> %a
-}
-
; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: unable to translate instruction: alloca:
; FALLBACK-WITH-REPORT-OUT-LABEL: scalable_alloca
define void @scalable_alloca() #1 {
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vfadd.ll b/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vfadd.ll
new file mode 100644
index 0000000000000..f4b46b6b5857a
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vfadd.ll
@@ -0,0 +1,764 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zvfh \
+; RUN: -verify-machineinstrs -target-abi=ilp32d -global-isel | FileCheck %s
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zvfh \
+; RUN: -verify-machineinstrs -target-abi=ilp32d -global-isel | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh \
+; RUN: -verify-machineinstrs -target-abi=lp64d -global-isel | FileCheck %s
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfhmin,+zvfh \
+; RUN: -verify-machineinstrs -target-abi=ilp32d -global-isel | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfhmin,+zvfh \
+; RUN: -verify-machineinstrs -target-abi=lp64d -global-isel | FileCheck %s
+
+declare <vscale x 1 x half> @llvm.riscv.vfadd.nxv1f16.nxv1f16(
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ iXLen, iXLen);
+
+define <vscale x 1 x half> @intrinsic_vfadd_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfadd_vv_nxv1f16_nxv1f16_nxv1f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT: vfadd.vv v8, v8, v9
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x half> @llvm.riscv.vfadd.nxv1f16.nxv1f16(
+ <vscale x 1 x half> undef,
+ <vscale x 1 x half> %0,
+ <vscale x 1 x half> %1,
+ iXLen 0, iXLen %2)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vfadd.mask.nxv1f16.nxv1f16(
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ <vscale x 1 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 1 x half> @intrinsic_vfadd_mask_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv1f16_nxv1f16_nxv1f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
+; CHECK-NEXT: vfadd.vv v8, v9, v10, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x half> @llvm.riscv.vfadd.mask.nxv1f16.nxv1f16(
+ <vscale x 1 x half> %0,
+ <vscale x 1 x half> %1,
+ <vscale x 1 x half> %2,
+ <vscale x 1 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 1)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 2 x half> @llvm.riscv.vfadd.nxv2f16.nxv2f16(
+ <vscale x 2 x half>,
+ <vscale x 2 x half>,
+ <vscale x 2 x half>,
+ iXLen, iXLen);
+
+define <vscale x 2 x half> @intrinsic_vfadd_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfadd_vv_nxv2f16_nxv2f16_nxv2f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT: vfadd.vv v8, v8, v9
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x half> @llvm.riscv.vfadd.nxv2f16.nxv2f16(
+ <vscale x 2 x half> undef,
+ <vscale x 2 x half> %0,
+ <vscale x 2 x half> %1,
+ iXLen 0, iXLen %2)
+
+ ret <vscale x 2 x half> %a
+}
+
+declare <vscale x 2 x half> @llvm.riscv.vfadd.mask.nxv2f16.nxv2f16(
+ <vscale x 2 x half>,
+ <vscale x 2 x half>,
+ <vscale x 2 x half>,
+ <vscale x 2 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 2 x half> @intrinsic_vfadd_mask_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv2f16_nxv2f16_nxv2f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
+; CHECK-NEXT: vfadd.vv v8, v9, v10, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x half> @llvm.riscv.vfadd.mask.nxv2f16.nxv2f16(
+ <vscale x 2 x half> %0,
+ <vscale x 2 x half> %1,
+ <vscale x 2 x half> %2,
+ <vscale x 2 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 1)
+
+ ret <vscale x 2 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vfadd.nxv4f16.nxv4f16(
+ <vscale x 4 x half>,
+ <vscale x 4 x half>,
+ <vscale x 4 x half>,
+ iXLen, iXLen);
+
+define <vscale x 4 x half> @intrinsic_vfadd_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfadd_vv_nxv4f16_nxv4f16_nxv4f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT: vfadd.vv v8, v8, v9
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x half> @llvm.riscv.vfadd.nxv4f16.nxv4f16(
+ <vscale x 4 x half> undef,
+ <vscale x 4 x half> %0,
+ <vscale x 4 x half> %1,
+ iXLen 0, iXLen %2)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vfadd.mask.nxv4f16.nxv4f16(
+ <vscale x 4 x half>,
+ <vscale x 4 x half>,
+ <vscale x 4 x half>,
+ <vscale x 4 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 4 x half> @intrinsic_vfadd_mask_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv4f16_nxv4f16_nxv4f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
+; CHECK-NEXT: vfadd.vv v8, v9, v10, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x half> @llvm.riscv.vfadd.mask.nxv4f16.nxv4f16(
+ <vscale x 4 x half> %0,
+ <vscale x 4 x half> %1,
+ <vscale x 4 x half> %2,
+ <vscale x 4 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 1)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 8 x half> @llvm.riscv.vfadd.nxv8f16.nxv8f16(
+ <vscale x 8 x half>,
+ <vscale x 8 x half>,
+ <vscale x 8 x half>,
+ iXLen, iXLen);
+
+define <vscale x 8 x half> @intrinsic_vfadd_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfadd_vv_nxv8f16_nxv8f16_nxv8f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT: vfadd.vv v8, v8, v10
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x half> @llvm.riscv.vfadd.nxv8f16.nxv8f16(
+ <vscale x 8 x half> undef,
+ <vscale x 8 x half> %0,
+ <vscale x 8 x half> %1,
+ iXLen 0, iXLen %2)
+
+ ret <vscale x 8 x half> %a
+}
+
+declare <vscale x 8 x half> @llvm.riscv.vfadd.mask.nxv8f16.nxv8f16(
+ <vscale x 8 x half>,
+ <vscale x 8 x half>,
+ <vscale x 8 x half>,
+ <vscale x 8 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 8 x half> @intrinsic_vfadd_mask_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv8f16_nxv8f16_nxv8f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
+; CHECK-NEXT: vfadd.vv v8, v10, v12, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x half> @llvm.riscv.vfadd.mask.nxv8f16.nxv8f16(
+ <vscale x 8 x half> %0,
+ <vscale x 8 x half> %1,
+ <vscale x 8 x half> %2,
+ <vscale x 8 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 1)
+
+ ret <vscale x 8 x half> %a
+}
+
+declare <vscale x 16 x half> @llvm.riscv.vfadd.nxv16f16.nxv16f16(
+ <vscale x 16 x half>,
+ <vscale x 16 x half>,
+ <vscale x 16 x half>,
+ iXLen, iXLen);
+
+define <vscale x 16 x half> @intrinsic_vfadd_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfadd_vv_nxv16f16_nxv16f16_nxv16f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT: vfadd.vv v8, v8, v12
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x half> @llvm.riscv.vfadd.nxv16f16.nxv16f16(
+ <vscale x 16 x half> undef,
+ <vscale x 16 x half> %0,
+ <vscale x 16 x half> %1,
+ iXLen 0, iXLen %2)
+
+ ret <vscale x 16 x half> %a
+}
+
+declare <vscale x 16 x half> @llvm.riscv.vfadd.mask.nxv16f16.nxv16f16(
+ <vscale x 16 x half>,
+ <vscale x 16 x half>,
+ <vscale x 16 x half>,
+ <vscale x 16 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 16 x half> @intrinsic_vfadd_mask_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv16f16_nxv16f16_nxv16f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
+; CHECK-NEXT: vfadd.vv v8, v12, v16, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x half> @llvm.riscv.vfadd.mask.nxv16f16.nxv16f16(
+ <vscale x 16 x half> %0,
+ <vscale x 16 x half> %1,
+ <vscale x 16 x half> %2,
+ <vscale x 16 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 1)
+
+ ret <vscale x 16 x half> %a
+}
+
+declare <vscale x 32 x half> @llvm.riscv.vfadd.nxv32f16.nxv32f16(
+ <vscale x 32 x half>,
+ <vscale x 32 x half>,
+ <vscale x 32 x half>,
+ iXLen, iXLen);
+
+define <vscale x 32 x half> @intrinsic_vfadd_vv_nxv32f16_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfadd_vv_nxv32f16_nxv32f16_nxv32f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
+; CHECK-NEXT: vfadd.vv v8, v8, v16
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 32 x half> @llvm.riscv.vfadd.nxv32f16.nxv32f16(
+ <vscale x 32 x half> undef,
+ <vscale x 32 x half> %0,
+ <vscale x 32 x half> %1,
+ iXLen 0, iXLen %2)
+
+ ret <vscale x 32 x half> %a
+}
+
+declare <vscale x 32 x half> @llvm.riscv.vfadd.mask.nxv32f16.nxv32f16(
+ <vscale x 32 x half>,
+ <vscale x 32 x half>,
+ <vscale x 32 x half>,
+ <vscale x 32 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 32 x half> @intrinsic_vfadd_mask_vv_nxv32f16_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, <vscale x 32 x half> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv32f16_nxv32f16_nxv32f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vl8re16.v v24, (a0)
+; CHECK-NEXT: fsrmi a0, 0
+; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu
+; CHECK-NEXT: vfadd.vv v8, v16, v24, v0.t
+; CHECK-NEXT: fsrm a0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 32 x half> @llvm.riscv.vfadd.mask.nxv32f16.nxv32f16(
+ <vscale x 32 x half> %0,
+ <vscale x 32 x half> %1,
+ <vscale x 32 x half> %2,
+ <vscale x 32 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 1)
+
+ ret <vscale x 32 x half> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vfadd.nxv1f32.nxv1f32(
+ <vscale x 1 x float>,
+ <vscale x 1 x float>,
+ <vscale x 1 x float>,
+ iXLen, iXLen);
+
+define <vscale x 1 x float> @intrinsic_vfadd_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfadd_vv_nxv1f32_nxv1f32_nxv1f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT: vfadd.vv v8, v8, v9
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vfadd.nxv1f32.nxv1f32(
+ <vscale x 1 x float> undef,
+ <vscale x 1 x float> %0,
+ <vscale x 1 x float> %1,
+ iXLen 0, iXLen %2)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vfadd.mask.nxv1f32.nxv1f32(
+ <vscale x 1 x float>,
+ <vscale x 1 x float>,
+ <vscale x 1 x float>,
+ <vscale x 1 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 1 x float> @intrinsic_vfadd_mask_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv1f32_nxv1f32_nxv1f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
+; CHECK-NEXT: vfadd.vv v8, v9, v10, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vfadd.mask.nxv1f32.nxv1f32(
+ <vscale x 1 x float> %0,
+ <vscale x 1 x float> %1,
+ <vscale x 1 x float> %2,
+ <vscale x 1 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 1)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfadd.nxv2f32.nxv2f32(
+ <vscale x 2 x float>,
+ <vscale x 2 x float>,
+ <vscale x 2 x float>,
+ iXLen, iXLen);
+
+define <vscale x 2 x float> @intrinsic_vfadd_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfadd_vv_nxv2f32_nxv2f32_nxv2f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT: vfadd.vv v8, v8, v9
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x float> @llvm.riscv.vfadd.nxv2f32.nxv2f32(
+ <vscale x 2 x float> undef,
+ <vscale x 2 x float> %0,
+ <vscale x 2 x float> %1,
+ iXLen 0, iXLen %2)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfadd.mask.nxv2f32.nxv2f32(
+ <vscale x 2 x float>,
+ <vscale x 2 x float>,
+ <vscale x 2 x float>,
+ <vscale x 2 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 2 x float> @intrinsic_vfadd_mask_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv2f32_nxv2f32_nxv2f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
+; CHECK-NEXT: vfadd.vv v8, v9, v10, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x float> @llvm.riscv.vfadd.mask.nxv2f32.nxv2f32(
+ <vscale x 2 x float> %0,
+ <vscale x 2 x float> %1,
+ <vscale x 2 x float> %2,
+ <vscale x 2 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 1)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vfadd.nxv4f32.nxv4f32(
+ <vscale x 4 x float>,
+ <vscale x 4 x float>,
+ <vscale x 4 x float>,
+ iXLen, iXLen);
+
+define <vscale x 4 x float> @intrinsic_vfadd_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfadd_vv_nxv4f32_nxv4f32_nxv4f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT: vfadd.vv v8, v8, v10
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x float> @llvm.riscv.vfadd.nxv4f32.nxv4f32(
+ <vscale x 4 x float> undef,
+ <vscale x 4 x float> %0,
+ <vscale x 4 x float> %1,
+ iXLen 0, iXLen %2)
+
+ ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vfadd.mask.nxv4f32.nxv4f32(
+ <vscale x 4 x float>,
+ <vscale x 4 x float>,
+ <vscale x 4 x float>,
+ <vscale x 4 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 4 x float> @intrinsic_vfadd_mask_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv4f32_nxv4f32_nxv4f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
+; CHECK-NEXT: vfadd.vv v8, v10, v12, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x float> @llvm.riscv.vfadd.mask.nxv4f32.nxv4f32(
+ <vscale x 4 x float> %0,
+ <vscale x 4 x float> %1,
+ <vscale x 4 x float> %2,
+ <vscale x 4 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 1)
+
+ ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vfadd.nxv8f32.nxv8f32(
+ <vscale x 8 x float>,
+ <vscale x 8 x float>,
+ <vscale x 8 x float>,
+ iXLen, iXLen);
+
+define <vscale x 8 x float> @intrinsic_vfadd_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfadd_vv_nxv8f32_nxv8f32_nxv8f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT: vfadd.vv v8, v8, v12
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x float> @llvm.riscv.vfadd.nxv8f32.nxv8f32(
+ <vscale x 8 x float> undef,
+ <vscale x 8 x float> %0,
+ <vscale x 8 x float> %1,
+ iXLen 0, iXLen %2)
+
+ ret <vscale x 8 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vfadd.mask.nxv8f32.nxv8f32(
+ <vscale x 8 x float>,
+ <vscale x 8 x float>,
+ <vscale x 8 x float>,
+ <vscale x 8 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 8 x float> @intrinsic_vfadd_mask_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv8f32_nxv8f32_nxv8f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
+; CHECK-NEXT: vfadd.vv v8, v12, v16, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x float> @llvm.riscv.vfadd.mask.nxv8f32.nxv8f32(
+ <vscale x 8 x float> %0,
+ <vscale x 8 x float> %1,
+ <vscale x 8 x float> %2,
+ <vscale x 8 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 1)
+
+ ret <vscale x 8 x float> %a
+}
+
+declare <vscale x 16 x float> @llvm.riscv.vfadd.nxv16f32.nxv16f32(
+ <vscale x 16 x float>,
+ <vscale x 16 x float>,
+ <vscale x 16 x float>,
+ iXLen, iXLen);
+
+define <vscale x 16 x float> @intrinsic_vfadd_vv_nxv16f32_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfadd_vv_nxv16f32_nxv16f32_nxv16f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
+; CHECK-NEXT: vfadd.vv v8, v8, v16
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x float> @llvm.riscv.vfadd.nxv16f32.nxv16f32(
+ <vscale x 16 x float> undef,
+ <vscale x 16 x float> %0,
+ <vscale x 16 x float> %1,
+ iXLen 0, iXLen %2)
+
+ ret <vscale x 16 x float> %a
+}
+
+declare <vscale x 16 x float> @llvm.riscv.vfadd.mask.nxv16f32.nxv16f32(
+ <vscale x 16 x float>,
+ <vscale x 16 x float>,
+ <vscale x 16 x float>,
+ <vscale x 16 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 16 x float> @intrinsic_vfadd_mask_vv_nxv16f32_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, <vscale x 16 x float> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv16f32_nxv16f32_nxv16f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vl8re32.v v24, (a0)
+; CHECK-NEXT: fsrmi a0, 0
+; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu
+; CHECK-NEXT: vfadd.vv v8, v16, v24, v0.t
+; CHECK-NEXT: fsrm a0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x float> @llvm.riscv.vfadd.mask.nxv16f32.nxv16f32(
+ <vscale x 16 x float> %0,
+ <vscale x 16 x float> %1,
+ <vscale x 16 x float> %2,
+ <vscale x 16 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 1)
+
+ ret <vscale x 16 x float> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.nxv1f64(
+ <vscale x 1 x double>,
+ <vscale x 1 x double>,
+ <vscale x 1 x double>,
+ iXLen, iXLen);
+
+define <vscale x 1 x double> @intrinsic_vfadd_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfadd_vv_nxv1f64_nxv1f64_nxv1f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT: vfadd.vv v8, v8, v9
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.nxv1f64(
+ <vscale x 1 x double> undef,
+ <vscale x 1 x double> %0,
+ <vscale x 1 x double> %1,
+ iXLen 0, iXLen %2)
+
+ ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vfadd.mask.nxv1f64.nxv1f64(
+ <vscale x 1 x double>,
+ <vscale x 1 x double>,
+ <vscale x 1 x double>,
+ <vscale x 1 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 1 x double> @intrinsic_vfadd_mask_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv1f64_nxv1f64_nxv1f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
+; CHECK-NEXT: vfadd.vv v8, v9, v10, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x double> @llvm.riscv.vfadd.mask.nxv1f64.nxv1f64(
+ <vscale x 1 x double> %0,
+ <vscale x 1 x double> %1,
+ <vscale x 1 x double> %2,
+ <vscale x 1 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 1)
+
+ ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vfadd.nxv2f64.nxv2f64(
+ <vscale x 2 x double>,
+ <vscale x 2 x double>,
+ <vscale x 2 x double>,
+ iXLen, iXLen);
+
+define <vscale x 2 x double> @intrinsic_vfadd_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfadd_vv_nxv2f64_nxv2f64_nxv2f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; CHECK-NEXT: vfadd.vv v8, v8, v10
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x double> @llvm.riscv.vfadd.nxv2f64.nxv2f64(
+ <vscale x 2 x double> undef,
+ <vscale x 2 x double> %0,
+ <vscale x 2 x double> %1,
+ iXLen 0, iXLen %2)
+
+ ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vfadd.mask.nxv2f64.nxv2f64(
+ <vscale x 2 x double>,
+ <vscale x 2 x double>,
+ <vscale x 2 x double>,
+ <vscale x 2 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 2 x double> @intrinsic_vfadd_mask_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv2f64_nxv2f64_nxv2f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
+; CHECK-NEXT: vfadd.vv v8, v10, v12, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x double> @llvm.riscv.vfadd.mask.nxv2f64.nxv2f64(
+ <vscale x 2 x double> %0,
+ <vscale x 2 x double> %1,
+ <vscale x 2 x double> %2,
+ <vscale x 2 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 1)
+
+ ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vfadd.nxv4f64.nxv4f64(
+ <vscale x 4 x double>,
+ <vscale x 4 x double>,
+ <vscale x 4 x double>,
+ iXLen, iXLen);
+
+define <vscale x 4 x double> @intrinsic_vfadd_vv_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfadd_vv_nxv4f64_nxv4f64_nxv4f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; CHECK-NEXT: vfadd.vv v8, v8, v12
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x double> @llvm.riscv.vfadd.nxv4f64.nxv4f64(
+ <vscale x 4 x double> undef,
+ <vscale x 4 x double> %0,
+ <vscale x 4 x double> %1,
+ iXLen 0, iXLen %2)
+
+ ret <vscale x 4 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vfadd.mask.nxv4f64.nxv4f64(
+ <vscale x 4 x double>,
+ <vscale x 4 x double>,
+ <vscale x 4 x double>,
+ <vscale x 4 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 4 x double> @intrinsic_vfadd_mask_vv_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv4f64_nxv4f64_nxv4f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
+; CHECK-NEXT: vfadd.vv v8, v12, v16, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x double> @llvm.riscv.vfadd.mask.nxv4f64.nxv4f64(
+ <vscale x 4 x double> %0,
+ <vscale x 4 x double> %1,
+ <vscale x 4 x double> %2,
+ <vscale x 4 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 1)
+
+ ret <vscale x 4 x double> %a
+}
+
+declare <vscale x 8 x double> @llvm.riscv.vfadd.nxv8f64.nxv8f64(
+ <vscale x 8 x double>,
+ <vscale x 8 x double>,
+ <vscale x 8 x double>,
+ iXLen, iXLen);
+
+define <vscale x 8 x double> @intrinsic_vfadd_vv_nxv8f64_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfadd_vv_nxv8f64_nxv8f64_nxv8f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; CHECK-NEXT: vfadd.vv v8, v8, v16
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x double> @llvm.riscv.vfadd.nxv8f64.nxv8f64(
+ <vscale x 8 x double> undef,
+ <vscale x 8 x double> %0,
+ <vscale x 8 x double> %1,
+ iXLen 0, iXLen %2)
+
+ ret <vscale x 8 x double> %a
+}
+
+declare <vscale x 8 x double> @llvm.riscv.vfadd.mask.nxv8f64.nxv8f64(
+ <vscale x 8 x double>,
+ <vscale x 8 x double>,
+ <vscale x 8 x double>,
+ <vscale x 8 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 8 x double> @intrinsic_vfadd_mask_vv_nxv8f64_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, <vscale x 8 x double> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv8f64_nxv8f64_nxv8f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vl8re64.v v24, (a0)
+; CHECK-NEXT: fsrmi a0, 0
+; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu
+; CHECK-NEXT: vfadd.vv v8, v16, v24, v0.t
+; CHECK-NEXT: fsrm a0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x double> @llvm.riscv.vfadd.mask.nxv8f64.nxv8f64(
+ <vscale x 8 x double> %0,
+ <vscale x 8 x double> %1,
+ <vscale x 8 x double> %2,
+ <vscale x 8 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 1)
+
+ ret <vscale x 8 x double> %a
+}
>From a29e66d895b4805558c9b36a9880f07ab54acfb6 Mon Sep 17 00:00:00 2001
From: Jianjian GUAN <jacquesguan at me.com>
Date: Wed, 3 Sep 2025 16:44:30 +0800
Subject: [PATCH 2/5] Address comment
---
llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp | 7 ++-----
llvm/lib/Target/RISCV/RISCVISelLowering.cpp | 13 ++++---------
2 files changed, 6 insertions(+), 14 deletions(-)
diff --git a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
index 2d175c7bf7899..f538ae70c1b8f 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
+++ b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
@@ -713,12 +713,9 @@ RISCVLegalizerInfo::RISCVLegalizerInfo(const RISCVSubtarget &ST)
bool RISCVLegalizerInfo::legalizeIntrinsic(LegalizerHelper &Helper,
MachineInstr &MI) const {
- Intrinsic::ID IntrinsicID = cast<GIntrinsic>(MI).getIntrinsicID();
+ Intrinsic::ID IntrinsicID = cast<GIntrinsic>(MI).getIntrinsicID();
- const RISCVVIntrinsicsTable::RISCVVIntrinsicInfo *II =
- RISCVVIntrinsicsTable::getRISCVVIntrinsicInfo(IntrinsicID);
-
- if (II) {
+ if (RISCVVIntrinsicsTable::getRISCVVIntrinsicInfo(IntrinsicID)) {
return true;
}
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index e2ea0e77ca23a..86ecc285545ec 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -37,6 +37,7 @@
#include "llvm/IR/DiagnosticInfo.h"
#include "llvm/IR/DiagnosticPrinter.h"
#include "llvm/IR/IRBuilder.h"
+#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/IntrinsicsRISCV.h"
#include "llvm/MC/MCCodeEmitter.h"
@@ -24880,15 +24881,9 @@ bool RISCVTargetLowering::fallBackToDAGISel(const Instruction &Inst) const {
Op == Instruction::Freeze || Op == Instruction::Store)
return false;
- if (Op == Instruction::Call) {
- const CallInst &CI = cast<CallInst>(Inst);
- const Function *F = CI.getCalledFunction();
- Intrinsic::ID ID = F ? F->getIntrinsicID() : Intrinsic::not_intrinsic;
-
- const RISCVVIntrinsicsTable::RISCVVIntrinsicInfo *II =
- RISCVVIntrinsicsTable::getRISCVVIntrinsicInfo(ID);
- // Mark RVV intrinsic is supported.
- if (II)
+ if (auto *II = dyn_cast<IntrinsicInst>(&Inst)) {
+ // Mark RVV intrinsic as supported.
+ if (RISCVVIntrinsicsTable::getRISCVVIntrinsicInfo(II->getIntrinsicID()))
return false;
}
>From 5cf5bc98574e94242b130996ca8911788f9e6b5f Mon Sep 17 00:00:00 2001
From: Jianjian GUAN <jacquesguan at me.com>
Date: Wed, 3 Sep 2025 16:56:43 +0800
Subject: [PATCH 3/5] format
---
llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp | 2 +-
llvm/lib/Target/RISCV/RISCVISelLowering.cpp | 2 +-
2 files changed, 2 insertions(+), 2 deletions(-)
diff --git a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
index f538ae70c1b8f..b7d4dedd615e7 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
+++ b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
@@ -713,7 +713,7 @@ RISCVLegalizerInfo::RISCVLegalizerInfo(const RISCVSubtarget &ST)
bool RISCVLegalizerInfo::legalizeIntrinsic(LegalizerHelper &Helper,
MachineInstr &MI) const {
- Intrinsic::ID IntrinsicID = cast<GIntrinsic>(MI).getIntrinsicID();
+ Intrinsic::ID IntrinsicID = cast<GIntrinsic>(MI).getIntrinsicID();
if (RISCVVIntrinsicsTable::getRISCVVIntrinsicInfo(IntrinsicID)) {
return true;
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 86ecc285545ec..2aea09891d51e 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -37,8 +37,8 @@
#include "llvm/IR/DiagnosticInfo.h"
#include "llvm/IR/DiagnosticPrinter.h"
#include "llvm/IR/IRBuilder.h"
-#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/Instructions.h"
+#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/IntrinsicsRISCV.h"
#include "llvm/MC/MCCodeEmitter.h"
#include "llvm/MC/MCInstBuilder.h"
>From 4cb432163a0b14c10e7bf972ed7a57d2dffc4b9d Mon Sep 17 00:00:00 2001
From: Jianjian GUAN <jacquesguan at me.com>
Date: Mon, 8 Sep 2025 14:32:00 +0800
Subject: [PATCH 4/5] Address comment
---
llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp | 3 +--
1 file changed, 1 insertion(+), 2 deletions(-)
diff --git a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
index b7d4dedd615e7..64e71e2a968a7 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
+++ b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
@@ -715,9 +715,8 @@ bool RISCVLegalizerInfo::legalizeIntrinsic(LegalizerHelper &Helper,
MachineInstr &MI) const {
Intrinsic::ID IntrinsicID = cast<GIntrinsic>(MI).getIntrinsicID();
- if (RISCVVIntrinsicsTable::getRISCVVIntrinsicInfo(IntrinsicID)) {
+ if (RISCVVIntrinsicsTable::getRISCVVIntrinsicInfo(IntrinsicID))
return true;
- }
switch (IntrinsicID) {
default:
>From f6b3421c7aa924c9c44c71605895284f2f6bba4f Mon Sep 17 00:00:00 2001
From: Jianjian GUAN <jacquesguan at me.com>
Date: Mon, 8 Sep 2025 11:43:24 +0800
Subject: [PATCH 5/5] [RISCV][GISel] Support select vx, vf form rvv intrinsics
For vx form, we legalize it with widen scalar. And for vf form, we select the right register bank.
---
llvm/include/llvm/IR/IntrinsicsRISCV.td | 163 +-
.../Target/RISCV/GISel/RISCVLegalizerInfo.cpp | 23 +-
.../RISCV/GISel/RISCVRegisterBankInfo.cpp | 28 +
llvm/lib/Target/RISCV/RISCVISelLowering.h | 1 +
.../Target/RISCV/RISCVInstrInfoVPseudos.td | 2 +-
.../test/CodeGen/RISCV/GlobalISel/rvv/vadd.ll | 2443 +++++++++++++++++
.../CodeGen/RISCV/GlobalISel/rvv/vfadd.ll | 750 +++++
7 files changed, 3334 insertions(+), 76 deletions(-)
create mode 100644 llvm/test/CodeGen/RISCV/GlobalISel/rvv/vadd.ll
diff --git a/llvm/include/llvm/IR/IntrinsicsRISCV.td b/llvm/include/llvm/IR/IntrinsicsRISCV.td
index 878f7b3194830..4d0debd399e5f 100644
--- a/llvm/include/llvm/IR/IntrinsicsRISCV.td
+++ b/llvm/include/llvm/IR/IntrinsicsRISCV.td
@@ -126,6 +126,7 @@ class RISCVVIntrinsic {
Intrinsic IntrinsicID = !cast<Intrinsic>(NAME);
bits<4> ScalarOperand = NoScalarOperand;
bits<5> VLOperand = NoVLOperand;
+ bit IsFPIntrinsic = 0;
}
let TargetPrefix = "riscv" in {
@@ -1442,14 +1443,15 @@ let TargetPrefix = "riscv" in {
defm vwmaccus : RISCVTernaryWide;
defm vwmaccsu : RISCVTernaryWide;
- defm vfadd : RISCVBinaryAAXRoundingMode;
- defm vfsub : RISCVBinaryAAXRoundingMode;
- defm vfrsub : RISCVBinaryAAXRoundingMode;
-
- defm vfwadd : RISCVBinaryABXRoundingMode;
- defm vfwsub : RISCVBinaryABXRoundingMode;
- defm vfwadd_w : RISCVBinaryAAXRoundingMode;
- defm vfwsub_w : RISCVBinaryAAXRoundingMode;
+ let IsFPIntrinsic = 1 in {
+ defm vfadd : RISCVBinaryAAXRoundingMode;
+ defm vfsub : RISCVBinaryAAXRoundingMode;
+ defm vfrsub : RISCVBinaryAAXRoundingMode;
+ defm vfwadd : RISCVBinaryABXRoundingMode;
+ defm vfwsub : RISCVBinaryABXRoundingMode;
+ defm vfwadd_w : RISCVBinaryAAXRoundingMode;
+ defm vfwsub_w : RISCVBinaryAAXRoundingMode;
+ }
defm vsaddu : RISCVSaturatingBinaryAAX;
defm vsadd : RISCVSaturatingBinaryAAX;
@@ -1484,6 +1486,7 @@ let TargetPrefix = "riscv" in {
llvm_anyint_ty],
[IntrNoMem]>, RISCVVIntrinsic {
let VLOperand = 2;
+ let IsFPIntrinsic = 1;
}
def int_riscv_vmv_x_s : DefaultAttrsIntrinsic<[LLVMVectorElementType<0>],
@@ -1506,51 +1509,57 @@ let TargetPrefix = "riscv" in {
llvm_anyint_ty],
[IntrNoMem]>, RISCVVIntrinsic {
let VLOperand = 2;
+ let IsFPIntrinsic = 1;
}
- defm vfmul : RISCVBinaryAAXRoundingMode;
- defm vfdiv : RISCVBinaryAAXRoundingMode;
- defm vfrdiv : RISCVBinaryAAXRoundingMode;
+ let IsFPIntrinsic = 1 in {
+ defm vfmul : RISCVBinaryAAXRoundingMode;
+ defm vfdiv : RISCVBinaryAAXRoundingMode;
+ defm vfrdiv : RISCVBinaryAAXRoundingMode;
- defm vfwmul : RISCVBinaryABXRoundingMode;
+ defm vfwmul : RISCVBinaryABXRoundingMode;
- defm vfmacc : RISCVTernaryAAXARoundingMode;
- defm vfnmacc : RISCVTernaryAAXARoundingMode;
- defm vfmsac : RISCVTernaryAAXARoundingMode;
- defm vfnmsac : RISCVTernaryAAXARoundingMode;
- defm vfmadd : RISCVTernaryAAXARoundingMode;
- defm vfnmadd : RISCVTernaryAAXARoundingMode;
- defm vfmsub : RISCVTernaryAAXARoundingMode;
- defm vfnmsub : RISCVTernaryAAXARoundingMode;
+ defm vfmacc : RISCVTernaryAAXARoundingMode;
+ defm vfnmacc : RISCVTernaryAAXARoundingMode;
+ defm vfmsac : RISCVTernaryAAXARoundingMode;
+ defm vfnmsac : RISCVTernaryAAXARoundingMode;
+ defm vfmadd : RISCVTernaryAAXARoundingMode;
+ defm vfnmadd : RISCVTernaryAAXARoundingMode;
+ defm vfmsub : RISCVTernaryAAXARoundingMode;
+ defm vfnmsub : RISCVTernaryAAXARoundingMode;
- defm vfwmacc : RISCVTernaryWideRoundingMode;
- defm vfwmaccbf16 : RISCVTernaryWideRoundingMode;
- defm vfwnmacc : RISCVTernaryWideRoundingMode;
- defm vfwmsac : RISCVTernaryWideRoundingMode;
- defm vfwnmsac : RISCVTernaryWideRoundingMode;
+ defm vfwmacc : RISCVTernaryWideRoundingMode;
+ defm vfwmaccbf16 : RISCVTernaryWideRoundingMode;
+ defm vfwnmacc : RISCVTernaryWideRoundingMode;
+ defm vfwmsac : RISCVTernaryWideRoundingMode;
+ defm vfwnmsac : RISCVTernaryWideRoundingMode;
- defm vfsqrt : RISCVUnaryAARoundingMode;
- defm vfrsqrt7 : RISCVUnaryAA;
- defm vfrec7 : RISCVUnaryAARoundingMode;
+ defm vfsqrt : RISCVUnaryAARoundingMode;
+ defm vfrsqrt7 : RISCVUnaryAA;
+ defm vfrec7 : RISCVUnaryAARoundingMode;
- defm vfmin : RISCVBinaryAAX;
- defm vfmax : RISCVBinaryAAX;
+ defm vfmin : RISCVBinaryAAX;
+ defm vfmax : RISCVBinaryAAX;
- defm vfsgnj : RISCVBinaryAAX;
- defm vfsgnjn : RISCVBinaryAAX;
- defm vfsgnjx : RISCVBinaryAAX;
+ defm vfsgnj : RISCVBinaryAAX;
+ defm vfsgnjn : RISCVBinaryAAX;
+ defm vfsgnjx : RISCVBinaryAAX;
- defm vfclass : RISCVClassify;
+ defm vfclass : RISCVClassify;
- defm vfmerge : RISCVBinaryWithV0;
+ defm vfmerge : RISCVBinaryWithV0;
+ }
defm vslideup : RVVSlide;
defm vslidedown : RVVSlide;
defm vslide1up : RISCVBinaryAAX;
defm vslide1down : RISCVBinaryAAX;
- defm vfslide1up : RISCVBinaryAAX;
- defm vfslide1down : RISCVBinaryAAX;
+
+ let IsFPIntrinsic = 1 in {
+ defm vfslide1up : RISCVBinaryAAX;
+ defm vfslide1down : RISCVBinaryAAX;
+ }
defm vrgather_vv : RISCVRGatherVV;
defm vrgather_vx : RISCVRGatherVX;
@@ -1571,12 +1580,14 @@ let TargetPrefix = "riscv" in {
defm vnclipu : RISCVSaturatingBinaryABShiftRoundingMode;
defm vnclip : RISCVSaturatingBinaryABShiftRoundingMode;
- defm vmfeq : RISCVCompare;
- defm vmfne : RISCVCompare;
- defm vmflt : RISCVCompare;
- defm vmfle : RISCVCompare;
- defm vmfgt : RISCVCompare;
- defm vmfge : RISCVCompare;
+ let IsFPIntrinsic = 1 in {
+ defm vmfeq : RISCVCompare;
+ defm vmfne : RISCVCompare;
+ defm vmflt : RISCVCompare;
+ defm vmfle : RISCVCompare;
+ defm vmfgt : RISCVCompare;
+ defm vmfge : RISCVCompare;
+ }
defm vredsum : RISCVReduction;
defm vredand : RISCVReduction;
@@ -1590,13 +1601,15 @@ let TargetPrefix = "riscv" in {
defm vwredsumu : RISCVReduction;
defm vwredsum : RISCVReduction;
- defm vfredosum : RISCVReductionRoundingMode;
- defm vfredusum : RISCVReductionRoundingMode;
- defm vfredmin : RISCVReduction;
- defm vfredmax : RISCVReduction;
+ let IsFPIntrinsic = 1 in {
+ defm vfredosum : RISCVReductionRoundingMode;
+ defm vfredusum : RISCVReductionRoundingMode;
+ defm vfredmin : RISCVReduction;
+ defm vfredmax : RISCVReduction;
- defm vfwredusum : RISCVReductionRoundingMode;
- defm vfwredosum : RISCVReductionRoundingMode;
+ defm vfwredusum : RISCVReductionRoundingMode;
+ defm vfwredosum : RISCVReductionRoundingMode;
+ }
def int_riscv_vmand: RISCVBinaryAAAUnMasked;
def int_riscv_vmnand: RISCVBinaryAAAUnMasked;
@@ -1615,31 +1628,33 @@ let TargetPrefix = "riscv" in {
defm vmsof : RISCVMaskedUnaryMOut;
defm vmsif : RISCVMaskedUnaryMOut;
- defm vfcvt_xu_f_v : RISCVConversionRoundingMode;
- defm vfcvt_x_f_v : RISCVConversionRoundingMode;
- defm vfcvt_rtz_xu_f_v : RISCVConversion;
- defm vfcvt_rtz_x_f_v : RISCVConversion;
- defm vfcvt_f_xu_v : RISCVConversionRoundingMode;
- defm vfcvt_f_x_v : RISCVConversionRoundingMode;
-
- defm vfwcvt_f_xu_v : RISCVConversion;
- defm vfwcvt_f_x_v : RISCVConversion;
- defm vfwcvt_xu_f_v : RISCVConversionRoundingMode;
- defm vfwcvt_x_f_v : RISCVConversionRoundingMode;
- defm vfwcvt_rtz_xu_f_v : RISCVConversion;
- defm vfwcvt_rtz_x_f_v : RISCVConversion;
- defm vfwcvt_f_f_v : RISCVConversion;
- defm vfwcvtbf16_f_f_v : RISCVConversion;
-
- defm vfncvt_f_xu_w : RISCVConversionRoundingMode;
- defm vfncvt_f_x_w : RISCVConversionRoundingMode;
- defm vfncvt_xu_f_w : RISCVConversionRoundingMode;
- defm vfncvt_x_f_w : RISCVConversionRoundingMode;
- defm vfncvt_rtz_xu_f_w : RISCVConversion;
- defm vfncvt_rtz_x_f_w : RISCVConversion;
- defm vfncvt_f_f_w : RISCVConversionRoundingMode;
- defm vfncvtbf16_f_f_w : RISCVConversionRoundingMode;
- defm vfncvt_rod_f_f_w : RISCVConversion;
+ let IsFPIntrinsic = 1 in {
+ defm vfcvt_xu_f_v : RISCVConversionRoundingMode;
+ defm vfcvt_x_f_v : RISCVConversionRoundingMode;
+ defm vfcvt_rtz_xu_f_v : RISCVConversion;
+ defm vfcvt_rtz_x_f_v : RISCVConversion;
+ defm vfcvt_f_xu_v : RISCVConversionRoundingMode;
+ defm vfcvt_f_x_v : RISCVConversionRoundingMode;
+
+ defm vfwcvt_f_xu_v : RISCVConversion;
+ defm vfwcvt_f_x_v : RISCVConversion;
+ defm vfwcvt_xu_f_v : RISCVConversionRoundingMode;
+ defm vfwcvt_x_f_v : RISCVConversionRoundingMode;
+ defm vfwcvt_rtz_xu_f_v : RISCVConversion;
+ defm vfwcvt_rtz_x_f_v : RISCVConversion;
+ defm vfwcvt_f_f_v : RISCVConversion;
+ defm vfwcvtbf16_f_f_v : RISCVConversion;
+
+ defm vfncvt_f_xu_w : RISCVConversionRoundingMode;
+ defm vfncvt_f_x_w : RISCVConversionRoundingMode;
+ defm vfncvt_xu_f_w : RISCVConversionRoundingMode;
+ defm vfncvt_x_f_w : RISCVConversionRoundingMode;
+ defm vfncvt_rtz_xu_f_w : RISCVConversion;
+ defm vfncvt_rtz_x_f_w : RISCVConversion;
+ defm vfncvt_f_f_w : RISCVConversionRoundingMode;
+ defm vfncvtbf16_f_f_w : RISCVConversionRoundingMode;
+ defm vfncvt_rod_f_f_w : RISCVConversion;
+ }
// Output: (vector)
// Input: (passthru, mask type input, vl)
diff --git a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
index 64e71e2a968a7..9c0b1708aabb8 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
+++ b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
@@ -715,8 +715,29 @@ bool RISCVLegalizerInfo::legalizeIntrinsic(LegalizerHelper &Helper,
MachineInstr &MI) const {
Intrinsic::ID IntrinsicID = cast<GIntrinsic>(MI).getIntrinsicID();
- if (RISCVVIntrinsicsTable::getRISCVVIntrinsicInfo(IntrinsicID))
+ if (auto *II = RISCVVIntrinsicsTable::getRISCVVIntrinsicInfo(IntrinsicID)) {
+ if (II->hasScalarOperand() && !II->IsFPIntrinsic) {
+ MachineIRBuilder &MIRBuilder = Helper.MIRBuilder;
+ MachineRegisterInfo &MRI = *MIRBuilder.getMRI();
+
+ auto OldScalar = MI.getOperand(II->ScalarOperand + 2).getReg();
+ // Legalize integer vx form intrinsic.
+ if (MRI.getType(OldScalar).isScalar()) {
+ if (MRI.getType(OldScalar).getSizeInBits() < sXLen.getSizeInBits()) {
+ auto NewScalar = MRI.createGenericVirtualRegister(sXLen);
+ MIRBuilder.buildExtOrTrunc(TargetOpcode::G_ANYEXT, NewScalar,
+ OldScalar);
+ Helper.Observer.changingInstr(MI);
+ MI.getOperand(II->ScalarOperand + 2).setReg(NewScalar);
+ Helper.Observer.changedInstr(MI);
+ } else if (MRI.getType(OldScalar).getSizeInBits() >
+ sXLen.getSizeInBits()) {
+ // TODO: i64 in riscv32.
+ }
+ }
+ }
return true;
+ }
switch (IntrinsicID) {
default:
diff --git a/llvm/lib/Target/RISCV/GISel/RISCVRegisterBankInfo.cpp b/llvm/lib/Target/RISCV/GISel/RISCVRegisterBankInfo.cpp
index a082b18867666..16d6c9a5652d3 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVRegisterBankInfo.cpp
+++ b/llvm/lib/Target/RISCV/GISel/RISCVRegisterBankInfo.cpp
@@ -500,6 +500,34 @@ RISCVRegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
OpdsMapping[1] = GPRValueMapping;
break;
}
+ case TargetOpcode::G_INTRINSIC: {
+ Intrinsic::ID IntrinsicID = cast<GIntrinsic>(MI).getIntrinsicID();
+
+ if (auto *II = RISCVVIntrinsicsTable::getRISCVVIntrinsicInfo(IntrinsicID)) {
+ unsigned ScalarIdx = -1;
+ if (II->hasScalarOperand()) {
+ ScalarIdx = II->ScalarOperand + 2;
+ }
+ for (unsigned Idx = 0; Idx < NumOperands; ++Idx) {
+ auto &MO = MI.getOperand(Idx);
+ if (!MO.isReg() || !MO.getReg())
+ continue;
+ LLT Ty = MRI.getType(MO.getReg());
+ if (!Ty.isValid())
+ continue;
+
+ if (Ty.isVector())
+ OpdsMapping[Idx] =
+ getVRBValueMapping(Ty.getSizeInBits().getKnownMinValue());
+ // Chose the right FPR for scalar operand of RVV intrinsics.
+ else if (II->IsFPIntrinsic && ScalarIdx == Idx)
+ OpdsMapping[Idx] = getFPValueMapping(Ty.getSizeInBits());
+ else
+ OpdsMapping[Idx] = GPRValueMapping;
+ }
+ }
+ break;
+ }
default:
// By default map all scalars to GPR.
for (unsigned Idx = 0; Idx < NumOperands; ++Idx) {
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.h b/llvm/lib/Target/RISCV/RISCVISelLowering.h
index 4581c11356aff..3f81ed74c12ed 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.h
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.h
@@ -642,6 +642,7 @@ struct RISCVVIntrinsicInfo {
unsigned IntrinsicID;
uint8_t ScalarOperand;
uint8_t VLOperand;
+ bool IsFPIntrinsic;
bool hasScalarOperand() const {
// 0xF is not valid. See NoScalarOperand in IntrinsicsRISCV.td.
return ScalarOperand != 0xF;
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
index 03e6f43a38945..ecde628fc7e21 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
@@ -575,7 +575,7 @@ def RISCVVInversePseudosTable : GenericTable {
def RISCVVIntrinsicsTable : GenericTable {
let FilterClass = "RISCVVIntrinsic";
let CppTypeName = "RISCVVIntrinsicInfo";
- let Fields = ["IntrinsicID", "ScalarOperand", "VLOperand"];
+ let Fields = ["IntrinsicID", "ScalarOperand", "VLOperand", "IsFPIntrinsic"];
let PrimaryKey = ["IntrinsicID"];
let PrimaryKeyName = "getRISCVVIntrinsicInfo";
}
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vadd.ll b/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vadd.ll
new file mode 100644
index 0000000000000..56616c286b6d8
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vadd.ll
@@ -0,0 +1,2443 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v -global-isel \
+; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v -global-isel \
+; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK
+
+declare <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.nxv1i8(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ iXLen);
+
+define <vscale x 1 x i8> @intrinsic_vadd_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vadd_vv_nxv1i8_nxv1i8_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT: vadd.vv v8, v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> undef,
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8> %1,
+ iXLen %2)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vadd.mask.nxv1i8.nxv1i8(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ iXLen, iXLen);
+
+define <vscale x 1 x i8> @intrinsic_vadd_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv1i8_nxv1i8_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
+; CHECK-NEXT: vadd.vv v8, v9, v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i8> @llvm.riscv.vadd.mask.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8> %1,
+ <vscale x 1 x i8> %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vadd.nxv2i8.nxv2i8(
+ <vscale x 2 x i8>,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i8>,
+ iXLen);
+
+define <vscale x 2 x i8> @intrinsic_vadd_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vadd_vv_nxv2i8_nxv2i8_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
+; CHECK-NEXT: vadd.vv v8, v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x i8> @llvm.riscv.vadd.nxv2i8.nxv2i8(
+ <vscale x 2 x i8> undef,
+ <vscale x 2 x i8> %0,
+ <vscale x 2 x i8> %1,
+ iXLen %2)
+
+ ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vadd.mask.nxv2i8.nxv2i8(
+ <vscale x 2 x i8>,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i1>,
+ iXLen, iXLen);
+
+define <vscale x 2 x i8> @intrinsic_vadd_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv2i8_nxv2i8_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
+; CHECK-NEXT: vadd.vv v8, v9, v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x i8> @llvm.riscv.vadd.mask.nxv2i8.nxv2i8(
+ <vscale x 2 x i8> %0,
+ <vscale x 2 x i8> %1,
+ <vscale x 2 x i8> %2,
+ <vscale x 2 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vadd.nxv4i8.nxv4i8(
+ <vscale x 4 x i8>,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i8>,
+ iXLen);
+
+define <vscale x 4 x i8> @intrinsic_vadd_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vadd_vv_nxv4i8_nxv4i8_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT: vadd.vv v8, v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x i8> @llvm.riscv.vadd.nxv4i8.nxv4i8(
+ <vscale x 4 x i8> undef,
+ <vscale x 4 x i8> %0,
+ <vscale x 4 x i8> %1,
+ iXLen %2)
+
+ ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vadd.mask.nxv4i8.nxv4i8(
+ <vscale x 4 x i8>,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i1>,
+ iXLen, iXLen);
+
+define <vscale x 4 x i8> @intrinsic_vadd_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv4i8_nxv4i8_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
+; CHECK-NEXT: vadd.vv v8, v9, v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x i8> @llvm.riscv.vadd.mask.nxv4i8.nxv4i8(
+ <vscale x 4 x i8> %0,
+ <vscale x 4 x i8> %1,
+ <vscale x 4 x i8> %2,
+ <vscale x 4 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vadd.nxv8i8.nxv8i8(
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>,
+ iXLen);
+
+define <vscale x 8 x i8> @intrinsic_vadd_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vadd_vv_nxv8i8_nxv8i8_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
+; CHECK-NEXT: vadd.vv v8, v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x i8> @llvm.riscv.vadd.nxv8i8.nxv8i8(
+ <vscale x 8 x i8> undef,
+ <vscale x 8 x i8> %0,
+ <vscale x 8 x i8> %1,
+ iXLen %2)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vadd.mask.nxv8i8.nxv8i8(
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i1>,
+ iXLen, iXLen);
+
+define <vscale x 8 x i8> @intrinsic_vadd_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv8i8_nxv8i8_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
+; CHECK-NEXT: vadd.vv v8, v9, v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x i8> @llvm.riscv.vadd.mask.nxv8i8.nxv8i8(
+ <vscale x 8 x i8> %0,
+ <vscale x 8 x i8> %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 8 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vadd.nxv16i8.nxv16i8(
+ <vscale x 16 x i8>,
+ <vscale x 16 x i8>,
+ <vscale x 16 x i8>,
+ iXLen);
+
+define <vscale x 16 x i8> @intrinsic_vadd_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vadd_vv_nxv16i8_nxv16i8_nxv16i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
+; CHECK-NEXT: vadd.vv v8, v8, v10
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x i8> @llvm.riscv.vadd.nxv16i8.nxv16i8(
+ <vscale x 16 x i8> undef,
+ <vscale x 16 x i8> %0,
+ <vscale x 16 x i8> %1,
+ iXLen %2)
+
+ ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vadd.mask.nxv16i8.nxv16i8(
+ <vscale x 16 x i8>,
+ <vscale x 16 x i8>,
+ <vscale x 16 x i8>,
+ <vscale x 16 x i1>,
+ iXLen, iXLen);
+
+define <vscale x 16 x i8> @intrinsic_vadd_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv16i8_nxv16i8_nxv16i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
+; CHECK-NEXT: vadd.vv v8, v10, v12, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x i8> @llvm.riscv.vadd.mask.nxv16i8.nxv16i8(
+ <vscale x 16 x i8> %0,
+ <vscale x 16 x i8> %1,
+ <vscale x 16 x i8> %2,
+ <vscale x 16 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vadd.nxv32i8.nxv32i8(
+ <vscale x 32 x i8>,
+ <vscale x 32 x i8>,
+ <vscale x 32 x i8>,
+ iXLen);
+
+define <vscale x 32 x i8> @intrinsic_vadd_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vadd_vv_nxv32i8_nxv32i8_nxv32i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
+; CHECK-NEXT: vadd.vv v8, v8, v12
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 32 x i8> @llvm.riscv.vadd.nxv32i8.nxv32i8(
+ <vscale x 32 x i8> undef,
+ <vscale x 32 x i8> %0,
+ <vscale x 32 x i8> %1,
+ iXLen %2)
+
+ ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vadd.mask.nxv32i8.nxv32i8(
+ <vscale x 32 x i8>,
+ <vscale x 32 x i8>,
+ <vscale x 32 x i8>,
+ <vscale x 32 x i1>,
+ iXLen, iXLen);
+
+define <vscale x 32 x i8> @intrinsic_vadd_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv32i8_nxv32i8_nxv32i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
+; CHECK-NEXT: vadd.vv v8, v12, v16, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 32 x i8> @llvm.riscv.vadd.mask.nxv32i8.nxv32i8(
+ <vscale x 32 x i8> %0,
+ <vscale x 32 x i8> %1,
+ <vscale x 32 x i8> %2,
+ <vscale x 32 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.vadd.nxv64i8.nxv64i8(
+ <vscale x 64 x i8>,
+ <vscale x 64 x i8>,
+ <vscale x 64 x i8>,
+ iXLen);
+
+define <vscale x 64 x i8> @intrinsic_vadd_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vadd_vv_nxv64i8_nxv64i8_nxv64i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
+; CHECK-NEXT: vadd.vv v8, v8, v16
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 64 x i8> @llvm.riscv.vadd.nxv64i8.nxv64i8(
+ <vscale x 64 x i8> undef,
+ <vscale x 64 x i8> %0,
+ <vscale x 64 x i8> %1,
+ iXLen %2)
+
+ ret <vscale x 64 x i8> %a
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.vadd.mask.nxv64i8.nxv64i8(
+ <vscale x 64 x i8>,
+ <vscale x 64 x i8>,
+ <vscale x 64 x i8>,
+ <vscale x 64 x i1>,
+ iXLen, iXLen);
+
+define <vscale x 64 x i8> @intrinsic_vadd_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv64i8_nxv64i8_nxv64i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vl8r.v v24, (a0)
+; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu
+; CHECK-NEXT: vadd.vv v8, v16, v24, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 64 x i8> @llvm.riscv.vadd.mask.nxv64i8.nxv64i8(
+ <vscale x 64 x i8> %0,
+ <vscale x 64 x i8> %1,
+ <vscale x 64 x i8> %2,
+ <vscale x 64 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 64 x i8> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vadd.nxv1i16.nxv1i16(
+ <vscale x 1 x i16>,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i16>,
+ iXLen);
+
+define <vscale x 1 x i16> @intrinsic_vadd_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vadd_vv_nxv1i16_nxv1i16_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT: vadd.vv v8, v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i16> @llvm.riscv.vadd.nxv1i16.nxv1i16(
+ <vscale x 1 x i16> undef,
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x i16> %1,
+ iXLen %2)
+
+ ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vadd.mask.nxv1i16.nxv1i16(
+ <vscale x 1 x i16>,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i1>,
+ iXLen, iXLen);
+
+define <vscale x 1 x i16> @intrinsic_vadd_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv1i16_nxv1i16_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
+; CHECK-NEXT: vadd.vv v8, v9, v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i16> @llvm.riscv.vadd.mask.nxv1i16.nxv1i16(
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x i16> %1,
+ <vscale x 1 x i16> %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vadd.nxv2i16.nxv2i16(
+ <vscale x 2 x i16>,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i16>,
+ iXLen);
+
+define <vscale x 2 x i16> @intrinsic_vadd_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vadd_vv_nxv2i16_nxv2i16_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT: vadd.vv v8, v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x i16> @llvm.riscv.vadd.nxv2i16.nxv2i16(
+ <vscale x 2 x i16> undef,
+ <vscale x 2 x i16> %0,
+ <vscale x 2 x i16> %1,
+ iXLen %2)
+
+ ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vadd.mask.nxv2i16.nxv2i16(
+ <vscale x 2 x i16>,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i1>,
+ iXLen, iXLen);
+
+define <vscale x 2 x i16> @intrinsic_vadd_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv2i16_nxv2i16_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
+; CHECK-NEXT: vadd.vv v8, v9, v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x i16> @llvm.riscv.vadd.mask.nxv2i16.nxv2i16(
+ <vscale x 2 x i16> %0,
+ <vscale x 2 x i16> %1,
+ <vscale x 2 x i16> %2,
+ <vscale x 2 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vadd.nxv4i16.nxv4i16(
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>,
+ iXLen);
+
+define <vscale x 4 x i16> @intrinsic_vadd_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vadd_vv_nxv4i16_nxv4i16_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT: vadd.vv v8, v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x i16> @llvm.riscv.vadd.nxv4i16.nxv4i16(
+ <vscale x 4 x i16> undef,
+ <vscale x 4 x i16> %0,
+ <vscale x 4 x i16> %1,
+ iXLen %2)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vadd.mask.nxv4i16.nxv4i16(
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i1>,
+ iXLen, iXLen);
+
+define <vscale x 4 x i16> @intrinsic_vadd_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv4i16_nxv4i16_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
+; CHECK-NEXT: vadd.vv v8, v9, v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x i16> @llvm.riscv.vadd.mask.nxv4i16.nxv4i16(
+ <vscale x 4 x i16> %0,
+ <vscale x 4 x i16> %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 4 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vadd.nxv8i16.nxv8i16(
+ <vscale x 8 x i16>,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i16>,
+ iXLen);
+
+define <vscale x 8 x i16> @intrinsic_vadd_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vadd_vv_nxv8i16_nxv8i16_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT: vadd.vv v8, v8, v10
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x i16> @llvm.riscv.vadd.nxv8i16.nxv8i16(
+ <vscale x 8 x i16> undef,
+ <vscale x 8 x i16> %0,
+ <vscale x 8 x i16> %1,
+ iXLen %2)
+
+ ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vadd.mask.nxv8i16.nxv8i16(
+ <vscale x 8 x i16>,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i1>,
+ iXLen, iXLen);
+
+define <vscale x 8 x i16> @intrinsic_vadd_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv8i16_nxv8i16_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
+; CHECK-NEXT: vadd.vv v8, v10, v12, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x i16> @llvm.riscv.vadd.mask.nxv8i16.nxv8i16(
+ <vscale x 8 x i16> %0,
+ <vscale x 8 x i16> %1,
+ <vscale x 8 x i16> %2,
+ <vscale x 8 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vadd.nxv16i16.nxv16i16(
+ <vscale x 16 x i16>,
+ <vscale x 16 x i16>,
+ <vscale x 16 x i16>,
+ iXLen);
+
+define <vscale x 16 x i16> @intrinsic_vadd_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vadd_vv_nxv16i16_nxv16i16_nxv16i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT: vadd.vv v8, v8, v12
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x i16> @llvm.riscv.vadd.nxv16i16.nxv16i16(
+ <vscale x 16 x i16> undef,
+ <vscale x 16 x i16> %0,
+ <vscale x 16 x i16> %1,
+ iXLen %2)
+
+ ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vadd.mask.nxv16i16.nxv16i16(
+ <vscale x 16 x i16>,
+ <vscale x 16 x i16>,
+ <vscale x 16 x i16>,
+ <vscale x 16 x i1>,
+ iXLen, iXLen);
+
+define <vscale x 16 x i16> @intrinsic_vadd_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv16i16_nxv16i16_nxv16i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
+; CHECK-NEXT: vadd.vv v8, v12, v16, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x i16> @llvm.riscv.vadd.mask.nxv16i16.nxv16i16(
+ <vscale x 16 x i16> %0,
+ <vscale x 16 x i16> %1,
+ <vscale x 16 x i16> %2,
+ <vscale x 16 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vadd.nxv32i16.nxv32i16(
+ <vscale x 32 x i16>,
+ <vscale x 32 x i16>,
+ <vscale x 32 x i16>,
+ iXLen);
+
+define <vscale x 32 x i16> @intrinsic_vadd_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vadd_vv_nxv32i16_nxv32i16_nxv32i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
+; CHECK-NEXT: vadd.vv v8, v8, v16
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 32 x i16> @llvm.riscv.vadd.nxv32i16.nxv32i16(
+ <vscale x 32 x i16> undef,
+ <vscale x 32 x i16> %0,
+ <vscale x 32 x i16> %1,
+ iXLen %2)
+
+ ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vadd.mask.nxv32i16.nxv32i16(
+ <vscale x 32 x i16>,
+ <vscale x 32 x i16>,
+ <vscale x 32 x i16>,
+ <vscale x 32 x i1>,
+ iXLen, iXLen);
+
+define <vscale x 32 x i16> @intrinsic_vadd_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv32i16_nxv32i16_nxv32i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vl8re16.v v24, (a0)
+; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu
+; CHECK-NEXT: vadd.vv v8, v16, v24, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 32 x i16> @llvm.riscv.vadd.mask.nxv32i16.nxv32i16(
+ <vscale x 32 x i16> %0,
+ <vscale x 32 x i16> %1,
+ <vscale x 32 x i16> %2,
+ <vscale x 32 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vadd.nxv1i32.nxv1i32(
+ <vscale x 1 x i32>,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i32>,
+ iXLen);
+
+define <vscale x 1 x i32> @intrinsic_vadd_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vadd_vv_nxv1i32_nxv1i32_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT: vadd.vv v8, v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i32> @llvm.riscv.vadd.nxv1i32.nxv1i32(
+ <vscale x 1 x i32> undef,
+ <vscale x 1 x i32> %0,
+ <vscale x 1 x i32> %1,
+ iXLen %2)
+
+ ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vadd.mask.nxv1i32.nxv1i32(
+ <vscale x 1 x i32>,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i1>,
+ iXLen, iXLen);
+
+define <vscale x 1 x i32> @intrinsic_vadd_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv1i32_nxv1i32_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
+; CHECK-NEXT: vadd.vv v8, v9, v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i32> @llvm.riscv.vadd.mask.nxv1i32.nxv1i32(
+ <vscale x 1 x i32> %0,
+ <vscale x 1 x i32> %1,
+ <vscale x 1 x i32> %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vadd.nxv2i32.nxv2i32(
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>,
+ iXLen);
+
+define <vscale x 2 x i32> @intrinsic_vadd_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vadd_vv_nxv2i32_nxv2i32_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT: vadd.vv v8, v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x i32> @llvm.riscv.vadd.nxv2i32.nxv2i32(
+ <vscale x 2 x i32> undef,
+ <vscale x 2 x i32> %0,
+ <vscale x 2 x i32> %1,
+ iXLen %2)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vadd.mask.nxv2i32.nxv2i32(
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i1>,
+ iXLen, iXLen);
+
+define <vscale x 2 x i32> @intrinsic_vadd_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv2i32_nxv2i32_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
+; CHECK-NEXT: vadd.vv v8, v9, v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x i32> @llvm.riscv.vadd.mask.nxv2i32.nxv2i32(
+ <vscale x 2 x i32> %0,
+ <vscale x 2 x i32> %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 2 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(
+ <vscale x 4 x i32>,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i32>,
+ iXLen);
+
+define <vscale x 4 x i32> @intrinsic_vadd_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vadd_vv_nxv4i32_nxv4i32_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT: vadd.vv v8, v8, v10
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(
+ <vscale x 4 x i32> undef,
+ <vscale x 4 x i32> %0,
+ <vscale x 4 x i32> %1,
+ iXLen %2)
+
+ ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vadd.mask.nxv4i32.nxv4i32(
+ <vscale x 4 x i32>,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i1>,
+ iXLen, iXLen);
+
+define <vscale x 4 x i32> @intrinsic_vadd_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv4i32_nxv4i32_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
+; CHECK-NEXT: vadd.vv v8, v10, v12, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x i32> @llvm.riscv.vadd.mask.nxv4i32.nxv4i32(
+ <vscale x 4 x i32> %0,
+ <vscale x 4 x i32> %1,
+ <vscale x 4 x i32> %2,
+ <vscale x 4 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vadd.nxv8i32.nxv8i32(
+ <vscale x 8 x i32>,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i32>,
+ iXLen);
+
+define <vscale x 8 x i32> @intrinsic_vadd_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vadd_vv_nxv8i32_nxv8i32_nxv8i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT: vadd.vv v8, v8, v12
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x i32> @llvm.riscv.vadd.nxv8i32.nxv8i32(
+ <vscale x 8 x i32> undef,
+ <vscale x 8 x i32> %0,
+ <vscale x 8 x i32> %1,
+ iXLen %2)
+
+ ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vadd.mask.nxv8i32.nxv8i32(
+ <vscale x 8 x i32>,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i1>,
+ iXLen, iXLen);
+
+define <vscale x 8 x i32> @intrinsic_vadd_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv8i32_nxv8i32_nxv8i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
+; CHECK-NEXT: vadd.vv v8, v12, v16, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x i32> @llvm.riscv.vadd.mask.nxv8i32.nxv8i32(
+ <vscale x 8 x i32> %0,
+ <vscale x 8 x i32> %1,
+ <vscale x 8 x i32> %2,
+ <vscale x 8 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vadd.nxv16i32.nxv16i32(
+ <vscale x 16 x i32>,
+ <vscale x 16 x i32>,
+ <vscale x 16 x i32>,
+ iXLen);
+
+define <vscale x 16 x i32> @intrinsic_vadd_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vadd_vv_nxv16i32_nxv16i32_nxv16i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
+; CHECK-NEXT: vadd.vv v8, v8, v16
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x i32> @llvm.riscv.vadd.nxv16i32.nxv16i32(
+ <vscale x 16 x i32> undef,
+ <vscale x 16 x i32> %0,
+ <vscale x 16 x i32> %1,
+ iXLen %2)
+
+ ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vadd.mask.nxv16i32.nxv16i32(
+ <vscale x 16 x i32>,
+ <vscale x 16 x i32>,
+ <vscale x 16 x i32>,
+ <vscale x 16 x i1>,
+ iXLen, iXLen);
+
+define <vscale x 16 x i32> @intrinsic_vadd_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv16i32_nxv16i32_nxv16i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vl8re32.v v24, (a0)
+; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu
+; CHECK-NEXT: vadd.vv v8, v16, v24, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x i32> @llvm.riscv.vadd.mask.nxv16i32.nxv16i32(
+ <vscale x 16 x i32> %0,
+ <vscale x 16 x i32> %1,
+ <vscale x 16 x i32> %2,
+ <vscale x 16 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.nxv1i64(
+ <vscale x 1 x i64>,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i64>,
+ iXLen);
+
+define <vscale x 1 x i64> @intrinsic_vadd_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vadd_vv_nxv1i64_nxv1i64_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT: vadd.vv v8, v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.nxv1i64(
+ <vscale x 1 x i64> undef,
+ <vscale x 1 x i64> %0,
+ <vscale x 1 x i64> %1,
+ iXLen %2)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vadd.mask.nxv1i64.nxv1i64(
+ <vscale x 1 x i64>,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i1>,
+ iXLen, iXLen);
+
+define <vscale x 1 x i64> @intrinsic_vadd_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv1i64_nxv1i64_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
+; CHECK-NEXT: vadd.vv v8, v9, v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i64> @llvm.riscv.vadd.mask.nxv1i64.nxv1i64(
+ <vscale x 1 x i64> %0,
+ <vscale x 1 x i64> %1,
+ <vscale x 1 x i64> %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vadd.nxv2i64.nxv2i64(
+ <vscale x 2 x i64>,
+ <vscale x 2 x i64>,
+ <vscale x 2 x i64>,
+ iXLen);
+
+define <vscale x 2 x i64> @intrinsic_vadd_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vadd_vv_nxv2i64_nxv2i64_nxv2i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; CHECK-NEXT: vadd.vv v8, v8, v10
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x i64> @llvm.riscv.vadd.nxv2i64.nxv2i64(
+ <vscale x 2 x i64> undef,
+ <vscale x 2 x i64> %0,
+ <vscale x 2 x i64> %1,
+ iXLen %2)
+
+ ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vadd.mask.nxv2i64.nxv2i64(
+ <vscale x 2 x i64>,
+ <vscale x 2 x i64>,
+ <vscale x 2 x i64>,
+ <vscale x 2 x i1>,
+ iXLen, iXLen);
+
+define <vscale x 2 x i64> @intrinsic_vadd_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv2i64_nxv2i64_nxv2i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
+; CHECK-NEXT: vadd.vv v8, v10, v12, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x i64> @llvm.riscv.vadd.mask.nxv2i64.nxv2i64(
+ <vscale x 2 x i64> %0,
+ <vscale x 2 x i64> %1,
+ <vscale x 2 x i64> %2,
+ <vscale x 2 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vadd.nxv4i64.nxv4i64(
+ <vscale x 4 x i64>,
+ <vscale x 4 x i64>,
+ <vscale x 4 x i64>,
+ iXLen);
+
+define <vscale x 4 x i64> @intrinsic_vadd_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vadd_vv_nxv4i64_nxv4i64_nxv4i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; CHECK-NEXT: vadd.vv v8, v8, v12
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x i64> @llvm.riscv.vadd.nxv4i64.nxv4i64(
+ <vscale x 4 x i64> undef,
+ <vscale x 4 x i64> %0,
+ <vscale x 4 x i64> %1,
+ iXLen %2)
+
+ ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vadd.mask.nxv4i64.nxv4i64(
+ <vscale x 4 x i64>,
+ <vscale x 4 x i64>,
+ <vscale x 4 x i64>,
+ <vscale x 4 x i1>,
+ iXLen, iXLen);
+
+define <vscale x 4 x i64> @intrinsic_vadd_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv4i64_nxv4i64_nxv4i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
+; CHECK-NEXT: vadd.vv v8, v12, v16, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x i64> @llvm.riscv.vadd.mask.nxv4i64.nxv4i64(
+ <vscale x 4 x i64> %0,
+ <vscale x 4 x i64> %1,
+ <vscale x 4 x i64> %2,
+ <vscale x 4 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vadd.nxv8i64.nxv8i64(
+ <vscale x 8 x i64>,
+ <vscale x 8 x i64>,
+ <vscale x 8 x i64>,
+ iXLen);
+
+define <vscale x 8 x i64> @intrinsic_vadd_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vadd_vv_nxv8i64_nxv8i64_nxv8i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; CHECK-NEXT: vadd.vv v8, v8, v16
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x i64> @llvm.riscv.vadd.nxv8i64.nxv8i64(
+ <vscale x 8 x i64> undef,
+ <vscale x 8 x i64> %0,
+ <vscale x 8 x i64> %1,
+ iXLen %2)
+
+ ret <vscale x 8 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vadd.mask.nxv8i64.nxv8i64(
+ <vscale x 8 x i64>,
+ <vscale x 8 x i64>,
+ <vscale x 8 x i64>,
+ <vscale x 8 x i1>,
+ iXLen, iXLen);
+
+define <vscale x 8 x i64> @intrinsic_vadd_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv8i64_nxv8i64_nxv8i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vl8re64.v v24, (a0)
+; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu
+; CHECK-NEXT: vadd.vv v8, v16, v24, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x i64> @llvm.riscv.vadd.mask.nxv8i64.nxv8i64(
+ <vscale x 8 x i64> %0,
+ <vscale x 8 x i64> %1,
+ <vscale x 8 x i64> %2,
+ <vscale x 8 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 8 x i64> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.i8(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ i8,
+ iXLen);
+
+define <vscale x 1 x i8> @intrinsic_vadd_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vadd_vx_nxv1i8_nxv1i8_i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
+; CHECK-NEXT: vadd.vx v8, v8, a0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.i8(
+ <vscale x 1 x i8> undef,
+ <vscale x 1 x i8> %0,
+ i8 %1,
+ iXLen %2)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vadd.mask.nxv1i8.i8(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ i8,
+ <vscale x 1 x i1>,
+ iXLen, iXLen);
+
+define <vscale x 1 x i8> @intrinsic_vadd_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv1i8_nxv1i8_i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
+; CHECK-NEXT: vadd.vx v8, v9, a0, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i8> @llvm.riscv.vadd.mask.nxv1i8.i8(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8> %1,
+ i8 %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vadd.nxv2i8.i8(
+ <vscale x 2 x i8>,
+ <vscale x 2 x i8>,
+ i8,
+ iXLen);
+
+define <vscale x 2 x i8> @intrinsic_vadd_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vadd_vx_nxv2i8_nxv2i8_i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
+; CHECK-NEXT: vadd.vx v8, v8, a0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x i8> @llvm.riscv.vadd.nxv2i8.i8(
+ <vscale x 2 x i8> undef,
+ <vscale x 2 x i8> %0,
+ i8 %1,
+ iXLen %2)
+
+ ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vadd.mask.nxv2i8.i8(
+ <vscale x 2 x i8>,
+ <vscale x 2 x i8>,
+ i8,
+ <vscale x 2 x i1>,
+ iXLen, iXLen);
+
+define <vscale x 2 x i8> @intrinsic_vadd_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv2i8_nxv2i8_i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
+; CHECK-NEXT: vadd.vx v8, v9, a0, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x i8> @llvm.riscv.vadd.mask.nxv2i8.i8(
+ <vscale x 2 x i8> %0,
+ <vscale x 2 x i8> %1,
+ i8 %2,
+ <vscale x 2 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vadd.nxv4i8.i8(
+ <vscale x 4 x i8>,
+ <vscale x 4 x i8>,
+ i8,
+ iXLen);
+
+define <vscale x 4 x i8> @intrinsic_vadd_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vadd_vx_nxv4i8_nxv4i8_i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
+; CHECK-NEXT: vadd.vx v8, v8, a0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x i8> @llvm.riscv.vadd.nxv4i8.i8(
+ <vscale x 4 x i8> undef,
+ <vscale x 4 x i8> %0,
+ i8 %1,
+ iXLen %2)
+
+ ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vadd.mask.nxv4i8.i8(
+ <vscale x 4 x i8>,
+ <vscale x 4 x i8>,
+ i8,
+ <vscale x 4 x i1>,
+ iXLen, iXLen);
+
+define <vscale x 4 x i8> @intrinsic_vadd_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv4i8_nxv4i8_i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
+; CHECK-NEXT: vadd.vx v8, v9, a0, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x i8> @llvm.riscv.vadd.mask.nxv4i8.i8(
+ <vscale x 4 x i8> %0,
+ <vscale x 4 x i8> %1,
+ i8 %2,
+ <vscale x 4 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vadd.nxv8i8.i8(
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>,
+ i8,
+ iXLen);
+
+define <vscale x 8 x i8> @intrinsic_vadd_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vadd_vx_nxv8i8_nxv8i8_i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
+; CHECK-NEXT: vadd.vx v8, v8, a0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x i8> @llvm.riscv.vadd.nxv8i8.i8(
+ <vscale x 8 x i8> undef,
+ <vscale x 8 x i8> %0,
+ i8 %1,
+ iXLen %2)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vadd.mask.nxv8i8.i8(
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>,
+ i8,
+ <vscale x 8 x i1>,
+ iXLen, iXLen);
+
+define <vscale x 8 x i8> @intrinsic_vadd_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv8i8_nxv8i8_i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
+; CHECK-NEXT: vadd.vx v8, v9, a0, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x i8> @llvm.riscv.vadd.mask.nxv8i8.i8(
+ <vscale x 8 x i8> %0,
+ <vscale x 8 x i8> %1,
+ i8 %2,
+ <vscale x 8 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vadd.nxv16i8.i8(
+ <vscale x 16 x i8>,
+ <vscale x 16 x i8>,
+ i8,
+ iXLen);
+
+define <vscale x 16 x i8> @intrinsic_vadd_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vadd_vx_nxv16i8_nxv16i8_i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
+; CHECK-NEXT: vadd.vx v8, v8, a0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x i8> @llvm.riscv.vadd.nxv16i8.i8(
+ <vscale x 16 x i8> undef,
+ <vscale x 16 x i8> %0,
+ i8 %1,
+ iXLen %2)
+
+ ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vadd.mask.nxv16i8.i8(
+ <vscale x 16 x i8>,
+ <vscale x 16 x i8>,
+ i8,
+ <vscale x 16 x i1>,
+ iXLen, iXLen);
+
+define <vscale x 16 x i8> @intrinsic_vadd_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv16i8_nxv16i8_i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
+; CHECK-NEXT: vadd.vx v8, v10, a0, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x i8> @llvm.riscv.vadd.mask.nxv16i8.i8(
+ <vscale x 16 x i8> %0,
+ <vscale x 16 x i8> %1,
+ i8 %2,
+ <vscale x 16 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vadd.nxv32i8.i8(
+ <vscale x 32 x i8>,
+ <vscale x 32 x i8>,
+ i8,
+ iXLen);
+
+define <vscale x 32 x i8> @intrinsic_vadd_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vadd_vx_nxv32i8_nxv32i8_i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
+; CHECK-NEXT: vadd.vx v8, v8, a0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 32 x i8> @llvm.riscv.vadd.nxv32i8.i8(
+ <vscale x 32 x i8> undef,
+ <vscale x 32 x i8> %0,
+ i8 %1,
+ iXLen %2)
+
+ ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vadd.mask.nxv32i8.i8(
+ <vscale x 32 x i8>,
+ <vscale x 32 x i8>,
+ i8,
+ <vscale x 32 x i1>,
+ iXLen, iXLen);
+
+define <vscale x 32 x i8> @intrinsic_vadd_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv32i8_nxv32i8_i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
+; CHECK-NEXT: vadd.vx v8, v12, a0, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 32 x i8> @llvm.riscv.vadd.mask.nxv32i8.i8(
+ <vscale x 32 x i8> %0,
+ <vscale x 32 x i8> %1,
+ i8 %2,
+ <vscale x 32 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.vadd.nxv64i8.i8(
+ <vscale x 64 x i8>,
+ <vscale x 64 x i8>,
+ i8,
+ iXLen);
+
+define <vscale x 64 x i8> @intrinsic_vadd_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vadd_vx_nxv64i8_nxv64i8_i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
+; CHECK-NEXT: vadd.vx v8, v8, a0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 64 x i8> @llvm.riscv.vadd.nxv64i8.i8(
+ <vscale x 64 x i8> undef,
+ <vscale x 64 x i8> %0,
+ i8 %1,
+ iXLen %2)
+
+ ret <vscale x 64 x i8> %a
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.vadd.mask.nxv64i8.i8(
+ <vscale x 64 x i8>,
+ <vscale x 64 x i8>,
+ i8,
+ <vscale x 64 x i1>,
+ iXLen, iXLen);
+
+define <vscale x 64 x i8> @intrinsic_vadd_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv64i8_nxv64i8_i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu
+; CHECK-NEXT: vadd.vx v8, v16, a0, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 64 x i8> @llvm.riscv.vadd.mask.nxv64i8.i8(
+ <vscale x 64 x i8> %0,
+ <vscale x 64 x i8> %1,
+ i8 %2,
+ <vscale x 64 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 64 x i8> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vadd.nxv1i16.i16(
+ <vscale x 1 x i16>,
+ <vscale x 1 x i16>,
+ i16,
+ iXLen);
+
+define <vscale x 1 x i16> @intrinsic_vadd_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vadd_vx_nxv1i16_nxv1i16_i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
+; CHECK-NEXT: vadd.vx v8, v8, a0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i16> @llvm.riscv.vadd.nxv1i16.i16(
+ <vscale x 1 x i16> undef,
+ <vscale x 1 x i16> %0,
+ i16 %1,
+ iXLen %2)
+
+ ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vadd.mask.nxv1i16.i16(
+ <vscale x 1 x i16>,
+ <vscale x 1 x i16>,
+ i16,
+ <vscale x 1 x i1>,
+ iXLen, iXLen);
+
+define <vscale x 1 x i16> @intrinsic_vadd_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv1i16_nxv1i16_i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
+; CHECK-NEXT: vadd.vx v8, v9, a0, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i16> @llvm.riscv.vadd.mask.nxv1i16.i16(
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x i16> %1,
+ i16 %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vadd.nxv2i16.i16(
+ <vscale x 2 x i16>,
+ <vscale x 2 x i16>,
+ i16,
+ iXLen);
+
+define <vscale x 2 x i16> @intrinsic_vadd_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vadd_vx_nxv2i16_nxv2i16_i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
+; CHECK-NEXT: vadd.vx v8, v8, a0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x i16> @llvm.riscv.vadd.nxv2i16.i16(
+ <vscale x 2 x i16> undef,
+ <vscale x 2 x i16> %0,
+ i16 %1,
+ iXLen %2)
+
+ ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vadd.mask.nxv2i16.i16(
+ <vscale x 2 x i16>,
+ <vscale x 2 x i16>,
+ i16,
+ <vscale x 2 x i1>,
+ iXLen, iXLen);
+
+define <vscale x 2 x i16> @intrinsic_vadd_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv2i16_nxv2i16_i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
+; CHECK-NEXT: vadd.vx v8, v9, a0, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x i16> @llvm.riscv.vadd.mask.nxv2i16.i16(
+ <vscale x 2 x i16> %0,
+ <vscale x 2 x i16> %1,
+ i16 %2,
+ <vscale x 2 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vadd.nxv4i16.i16(
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>,
+ i16,
+ iXLen);
+
+define <vscale x 4 x i16> @intrinsic_vadd_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vadd_vx_nxv4i16_nxv4i16_i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
+; CHECK-NEXT: vadd.vx v8, v8, a0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x i16> @llvm.riscv.vadd.nxv4i16.i16(
+ <vscale x 4 x i16> undef,
+ <vscale x 4 x i16> %0,
+ i16 %1,
+ iXLen %2)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vadd.mask.nxv4i16.i16(
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>,
+ i16,
+ <vscale x 4 x i1>,
+ iXLen, iXLen);
+
+define <vscale x 4 x i16> @intrinsic_vadd_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv4i16_nxv4i16_i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
+; CHECK-NEXT: vadd.vx v8, v9, a0, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x i16> @llvm.riscv.vadd.mask.nxv4i16.i16(
+ <vscale x 4 x i16> %0,
+ <vscale x 4 x i16> %1,
+ i16 %2,
+ <vscale x 4 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vadd.nxv8i16.i16(
+ <vscale x 8 x i16>,
+ <vscale x 8 x i16>,
+ i16,
+ iXLen);
+
+define <vscale x 8 x i16> @intrinsic_vadd_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vadd_vx_nxv8i16_nxv8i16_i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; CHECK-NEXT: vadd.vx v8, v8, a0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x i16> @llvm.riscv.vadd.nxv8i16.i16(
+ <vscale x 8 x i16> undef,
+ <vscale x 8 x i16> %0,
+ i16 %1,
+ iXLen %2)
+
+ ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vadd.mask.nxv8i16.i16(
+ <vscale x 8 x i16>,
+ <vscale x 8 x i16>,
+ i16,
+ <vscale x 8 x i1>,
+ iXLen, iXLen);
+
+define <vscale x 8 x i16> @intrinsic_vadd_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv8i16_nxv8i16_i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
+; CHECK-NEXT: vadd.vx v8, v10, a0, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x i16> @llvm.riscv.vadd.mask.nxv8i16.i16(
+ <vscale x 8 x i16> %0,
+ <vscale x 8 x i16> %1,
+ i16 %2,
+ <vscale x 8 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vadd.nxv16i16.i16(
+ <vscale x 16 x i16>,
+ <vscale x 16 x i16>,
+ i16,
+ iXLen);
+
+define <vscale x 16 x i16> @intrinsic_vadd_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vadd_vx_nxv16i16_nxv16i16_i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
+; CHECK-NEXT: vadd.vx v8, v8, a0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x i16> @llvm.riscv.vadd.nxv16i16.i16(
+ <vscale x 16 x i16> undef,
+ <vscale x 16 x i16> %0,
+ i16 %1,
+ iXLen %2)
+
+ ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vadd.mask.nxv16i16.i16(
+ <vscale x 16 x i16>,
+ <vscale x 16 x i16>,
+ i16,
+ <vscale x 16 x i1>,
+ iXLen, iXLen);
+
+define <vscale x 16 x i16> @intrinsic_vadd_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv16i16_nxv16i16_i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
+; CHECK-NEXT: vadd.vx v8, v12, a0, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x i16> @llvm.riscv.vadd.mask.nxv16i16.i16(
+ <vscale x 16 x i16> %0,
+ <vscale x 16 x i16> %1,
+ i16 %2,
+ <vscale x 16 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vadd.nxv32i16.i16(
+ <vscale x 32 x i16>,
+ <vscale x 32 x i16>,
+ i16,
+ iXLen);
+
+define <vscale x 32 x i16> @intrinsic_vadd_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vadd_vx_nxv32i16_nxv32i16_i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
+; CHECK-NEXT: vadd.vx v8, v8, a0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 32 x i16> @llvm.riscv.vadd.nxv32i16.i16(
+ <vscale x 32 x i16> undef,
+ <vscale x 32 x i16> %0,
+ i16 %1,
+ iXLen %2)
+
+ ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vadd.mask.nxv32i16.i16(
+ <vscale x 32 x i16>,
+ <vscale x 32 x i16>,
+ i16,
+ <vscale x 32 x i1>,
+ iXLen, iXLen);
+
+define <vscale x 32 x i16> @intrinsic_vadd_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv32i16_nxv32i16_i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu
+; CHECK-NEXT: vadd.vx v8, v16, a0, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 32 x i16> @llvm.riscv.vadd.mask.nxv32i16.i16(
+ <vscale x 32 x i16> %0,
+ <vscale x 32 x i16> %1,
+ i16 %2,
+ <vscale x 32 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vadd.nxv1i32.i32(
+ <vscale x 1 x i32>,
+ <vscale x 1 x i32>,
+ i32,
+ iXLen);
+
+define <vscale x 1 x i32> @intrinsic_vadd_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vadd_vx_nxv1i32_nxv1i32_i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
+; CHECK-NEXT: vadd.vx v8, v8, a0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i32> @llvm.riscv.vadd.nxv1i32.i32(
+ <vscale x 1 x i32> undef,
+ <vscale x 1 x i32> %0,
+ i32 %1,
+ iXLen %2)
+
+ ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vadd.mask.nxv1i32.i32(
+ <vscale x 1 x i32>,
+ <vscale x 1 x i32>,
+ i32,
+ <vscale x 1 x i1>,
+ iXLen, iXLen);
+
+define <vscale x 1 x i32> @intrinsic_vadd_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv1i32_nxv1i32_i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
+; CHECK-NEXT: vadd.vx v8, v9, a0, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i32> @llvm.riscv.vadd.mask.nxv1i32.i32(
+ <vscale x 1 x i32> %0,
+ <vscale x 1 x i32> %1,
+ i32 %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vadd.nxv2i32.i32(
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>,
+ i32,
+ iXLen);
+
+define <vscale x 2 x i32> @intrinsic_vadd_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vadd_vx_nxv2i32_nxv2i32_i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
+; CHECK-NEXT: vadd.vx v8, v8, a0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x i32> @llvm.riscv.vadd.nxv2i32.i32(
+ <vscale x 2 x i32> undef,
+ <vscale x 2 x i32> %0,
+ i32 %1,
+ iXLen %2)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vadd.mask.nxv2i32.i32(
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>,
+ i32,
+ <vscale x 2 x i1>,
+ iXLen, iXLen);
+
+define <vscale x 2 x i32> @intrinsic_vadd_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv2i32_nxv2i32_i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
+; CHECK-NEXT: vadd.vx v8, v9, a0, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x i32> @llvm.riscv.vadd.mask.nxv2i32.i32(
+ <vscale x 2 x i32> %0,
+ <vscale x 2 x i32> %1,
+ i32 %2,
+ <vscale x 2 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.i32(
+ <vscale x 4 x i32>,
+ <vscale x 4 x i32>,
+ i32,
+ iXLen);
+
+define <vscale x 4 x i32> @intrinsic_vadd_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vadd_vx_nxv4i32_nxv4i32_i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
+; CHECK-NEXT: vadd.vx v8, v8, a0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.i32(
+ <vscale x 4 x i32> undef,
+ <vscale x 4 x i32> %0,
+ i32 %1,
+ iXLen %2)
+
+ ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vadd.mask.nxv4i32.i32(
+ <vscale x 4 x i32>,
+ <vscale x 4 x i32>,
+ i32,
+ <vscale x 4 x i1>,
+ iXLen, iXLen);
+
+define <vscale x 4 x i32> @intrinsic_vadd_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv4i32_nxv4i32_i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
+; CHECK-NEXT: vadd.vx v8, v10, a0, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x i32> @llvm.riscv.vadd.mask.nxv4i32.i32(
+ <vscale x 4 x i32> %0,
+ <vscale x 4 x i32> %1,
+ i32 %2,
+ <vscale x 4 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vadd.nxv8i32.i32(
+ <vscale x 8 x i32>,
+ <vscale x 8 x i32>,
+ i32,
+ iXLen);
+
+define <vscale x 8 x i32> @intrinsic_vadd_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vadd_vx_nxv8i32_nxv8i32_i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
+; CHECK-NEXT: vadd.vx v8, v8, a0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x i32> @llvm.riscv.vadd.nxv8i32.i32(
+ <vscale x 8 x i32> undef,
+ <vscale x 8 x i32> %0,
+ i32 %1,
+ iXLen %2)
+
+ ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vadd.mask.nxv8i32.i32(
+ <vscale x 8 x i32>,
+ <vscale x 8 x i32>,
+ i32,
+ <vscale x 8 x i1>,
+ iXLen, iXLen);
+
+define <vscale x 8 x i32> @intrinsic_vadd_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv8i32_nxv8i32_i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
+; CHECK-NEXT: vadd.vx v8, v12, a0, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x i32> @llvm.riscv.vadd.mask.nxv8i32.i32(
+ <vscale x 8 x i32> %0,
+ <vscale x 8 x i32> %1,
+ i32 %2,
+ <vscale x 8 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vadd.nxv16i32.i32(
+ <vscale x 16 x i32>,
+ <vscale x 16 x i32>,
+ i32,
+ iXLen);
+
+define <vscale x 16 x i32> @intrinsic_vadd_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vadd_vx_nxv16i32_nxv16i32_i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
+; CHECK-NEXT: vadd.vx v8, v8, a0
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x i32> @llvm.riscv.vadd.nxv16i32.i32(
+ <vscale x 16 x i32> undef,
+ <vscale x 16 x i32> %0,
+ i32 %1,
+ iXLen %2)
+
+ ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vadd.mask.nxv16i32.i32(
+ <vscale x 16 x i32>,
+ <vscale x 16 x i32>,
+ i32,
+ <vscale x 16 x i1>,
+ iXLen, iXLen);
+
+define <vscale x 16 x i32> @intrinsic_vadd_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv16i32_nxv16i32_i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu
+; CHECK-NEXT: vadd.vx v8, v16, a0, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x i32> @llvm.riscv.vadd.mask.nxv16i32.i32(
+ <vscale x 16 x i32> %0,
+ <vscale x 16 x i32> %1,
+ i32 %2,
+ <vscale x 16 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 16 x i32> %a
+}
+
+define <vscale x 1 x i8> @intrinsic_vadd_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vadd_vi_nxv1i8_nxv1i8_i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT: vadd.vi v8, v8, 9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.i8(
+ <vscale x 1 x i8> undef,
+ <vscale x 1 x i8> %0,
+ i8 9,
+ iXLen %1)
+
+ ret <vscale x 1 x i8> %a
+}
+
+define <vscale x 1 x i8> @intrinsic_vadd_mask_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv1i8_nxv1i8_i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
+; CHECK-NEXT: vadd.vi v8, v9, -9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i8> @llvm.riscv.vadd.mask.nxv1i8.i8(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8> %1,
+ i8 -9,
+ <vscale x 1 x i1> %2,
+ iXLen %3, iXLen 1)
+
+ ret <vscale x 1 x i8> %a
+}
+
+define <vscale x 2 x i8> @intrinsic_vadd_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vadd_vi_nxv2i8_nxv2i8_i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
+; CHECK-NEXT: vadd.vi v8, v8, 9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x i8> @llvm.riscv.vadd.nxv2i8.i8(
+ <vscale x 2 x i8> undef,
+ <vscale x 2 x i8> %0,
+ i8 9,
+ iXLen %1)
+
+ ret <vscale x 2 x i8> %a
+}
+
+define <vscale x 2 x i8> @intrinsic_vadd_mask_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv2i8_nxv2i8_i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
+; CHECK-NEXT: vadd.vi v8, v9, -9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x i8> @llvm.riscv.vadd.mask.nxv2i8.i8(
+ <vscale x 2 x i8> %0,
+ <vscale x 2 x i8> %1,
+ i8 -9,
+ <vscale x 2 x i1> %2,
+ iXLen %3, iXLen 1)
+
+ ret <vscale x 2 x i8> %a
+}
+
+define <vscale x 4 x i8> @intrinsic_vadd_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vadd_vi_nxv4i8_nxv4i8_i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT: vadd.vi v8, v8, 9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x i8> @llvm.riscv.vadd.nxv4i8.i8(
+ <vscale x 4 x i8> undef,
+ <vscale x 4 x i8> %0,
+ i8 9,
+ iXLen %1)
+
+ ret <vscale x 4 x i8> %a
+}
+
+define <vscale x 4 x i8> @intrinsic_vadd_mask_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv4i8_nxv4i8_i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
+; CHECK-NEXT: vadd.vi v8, v9, -9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x i8> @llvm.riscv.vadd.mask.nxv4i8.i8(
+ <vscale x 4 x i8> %0,
+ <vscale x 4 x i8> %1,
+ i8 -9,
+ <vscale x 4 x i1> %2,
+ iXLen %3, iXLen 1)
+
+ ret <vscale x 4 x i8> %a
+}
+
+define <vscale x 8 x i8> @intrinsic_vadd_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vadd_vi_nxv8i8_nxv8i8_i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
+; CHECK-NEXT: vadd.vi v8, v8, 9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x i8> @llvm.riscv.vadd.nxv8i8.i8(
+ <vscale x 8 x i8> undef,
+ <vscale x 8 x i8> %0,
+ i8 9,
+ iXLen %1)
+
+ ret <vscale x 8 x i8> %a
+}
+
+define <vscale x 8 x i8> @intrinsic_vadd_mask_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv8i8_nxv8i8_i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
+; CHECK-NEXT: vadd.vi v8, v9, -9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x i8> @llvm.riscv.vadd.mask.nxv8i8.i8(
+ <vscale x 8 x i8> %0,
+ <vscale x 8 x i8> %1,
+ i8 -9,
+ <vscale x 8 x i1> %2,
+ iXLen %3, iXLen 1)
+
+ ret <vscale x 8 x i8> %a
+}
+
+define <vscale x 16 x i8> @intrinsic_vadd_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vadd_vi_nxv16i8_nxv16i8_i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
+; CHECK-NEXT: vadd.vi v8, v8, 9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x i8> @llvm.riscv.vadd.nxv16i8.i8(
+ <vscale x 16 x i8> undef,
+ <vscale x 16 x i8> %0,
+ i8 9,
+ iXLen %1)
+
+ ret <vscale x 16 x i8> %a
+}
+
+define <vscale x 16 x i8> @intrinsic_vadd_mask_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv16i8_nxv16i8_i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
+; CHECK-NEXT: vadd.vi v8, v10, -9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x i8> @llvm.riscv.vadd.mask.nxv16i8.i8(
+ <vscale x 16 x i8> %0,
+ <vscale x 16 x i8> %1,
+ i8 -9,
+ <vscale x 16 x i1> %2,
+ iXLen %3, iXLen 1)
+
+ ret <vscale x 16 x i8> %a
+}
+
+define <vscale x 32 x i8> @intrinsic_vadd_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vadd_vi_nxv32i8_nxv32i8_i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
+; CHECK-NEXT: vadd.vi v8, v8, 9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 32 x i8> @llvm.riscv.vadd.nxv32i8.i8(
+ <vscale x 32 x i8> undef,
+ <vscale x 32 x i8> %0,
+ i8 9,
+ iXLen %1)
+
+ ret <vscale x 32 x i8> %a
+}
+
+define <vscale x 32 x i8> @intrinsic_vadd_mask_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv32i8_nxv32i8_i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
+; CHECK-NEXT: vadd.vi v8, v12, -9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 32 x i8> @llvm.riscv.vadd.mask.nxv32i8.i8(
+ <vscale x 32 x i8> %0,
+ <vscale x 32 x i8> %1,
+ i8 -9,
+ <vscale x 32 x i1> %2,
+ iXLen %3, iXLen 1)
+
+ ret <vscale x 32 x i8> %a
+}
+
+define <vscale x 64 x i8> @intrinsic_vadd_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vadd_vi_nxv64i8_nxv64i8_i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
+; CHECK-NEXT: vadd.vi v8, v8, -9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 64 x i8> @llvm.riscv.vadd.nxv64i8.i8(
+ <vscale x 64 x i8> undef,
+ <vscale x 64 x i8> %0,
+ i8 -9,
+ iXLen %1)
+
+ ret <vscale x 64 x i8> %a
+}
+
+define <vscale x 64 x i8> @intrinsic_vadd_mask_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv64i8_nxv64i8_i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu
+; CHECK-NEXT: vadd.vi v8, v16, -9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 64 x i8> @llvm.riscv.vadd.mask.nxv64i8.i8(
+ <vscale x 64 x i8> %0,
+ <vscale x 64 x i8> %1,
+ i8 -9,
+ <vscale x 64 x i1> %2,
+ iXLen %3, iXLen 1)
+
+ ret <vscale x 64 x i8> %a
+}
+
+define <vscale x 1 x i16> @intrinsic_vadd_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vadd_vi_nxv1i16_nxv1i16_i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT: vadd.vi v8, v8, 9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i16> @llvm.riscv.vadd.nxv1i16.i16(
+ <vscale x 1 x i16> undef,
+ <vscale x 1 x i16> %0,
+ i16 9,
+ iXLen %1)
+
+ ret <vscale x 1 x i16> %a
+}
+
+define <vscale x 1 x i16> @intrinsic_vadd_mask_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv1i16_nxv1i16_i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
+; CHECK-NEXT: vadd.vi v8, v9, -9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i16> @llvm.riscv.vadd.mask.nxv1i16.i16(
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x i16> %1,
+ i16 -9,
+ <vscale x 1 x i1> %2,
+ iXLen %3, iXLen 1)
+
+ ret <vscale x 1 x i16> %a
+}
+
+define <vscale x 2 x i16> @intrinsic_vadd_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vadd_vi_nxv2i16_nxv2i16_i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT: vadd.vi v8, v8, 9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x i16> @llvm.riscv.vadd.nxv2i16.i16(
+ <vscale x 2 x i16> undef,
+ <vscale x 2 x i16> %0,
+ i16 9,
+ iXLen %1)
+
+ ret <vscale x 2 x i16> %a
+}
+
+define <vscale x 2 x i16> @intrinsic_vadd_mask_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv2i16_nxv2i16_i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
+; CHECK-NEXT: vadd.vi v8, v9, -9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x i16> @llvm.riscv.vadd.mask.nxv2i16.i16(
+ <vscale x 2 x i16> %0,
+ <vscale x 2 x i16> %1,
+ i16 -9,
+ <vscale x 2 x i1> %2,
+ iXLen %3, iXLen 1)
+
+ ret <vscale x 2 x i16> %a
+}
+
+define <vscale x 4 x i16> @intrinsic_vadd_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vadd_vi_nxv4i16_nxv4i16_i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT: vadd.vi v8, v8, 9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x i16> @llvm.riscv.vadd.nxv4i16.i16(
+ <vscale x 4 x i16> undef,
+ <vscale x 4 x i16> %0,
+ i16 9,
+ iXLen %1)
+
+ ret <vscale x 4 x i16> %a
+}
+
+define <vscale x 4 x i16> @intrinsic_vadd_mask_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv4i16_nxv4i16_i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
+; CHECK-NEXT: vadd.vi v8, v9, -9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x i16> @llvm.riscv.vadd.mask.nxv4i16.i16(
+ <vscale x 4 x i16> %0,
+ <vscale x 4 x i16> %1,
+ i16 -9,
+ <vscale x 4 x i1> %2,
+ iXLen %3, iXLen 1)
+
+ ret <vscale x 4 x i16> %a
+}
+
+define <vscale x 8 x i16> @intrinsic_vadd_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vadd_vi_nxv8i16_nxv8i16_i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT: vadd.vi v8, v8, 9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x i16> @llvm.riscv.vadd.nxv8i16.i16(
+ <vscale x 8 x i16> undef,
+ <vscale x 8 x i16> %0,
+ i16 9,
+ iXLen %1)
+
+ ret <vscale x 8 x i16> %a
+}
+
+define <vscale x 8 x i16> @intrinsic_vadd_mask_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv8i16_nxv8i16_i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
+; CHECK-NEXT: vadd.vi v8, v10, -9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x i16> @llvm.riscv.vadd.mask.nxv8i16.i16(
+ <vscale x 8 x i16> %0,
+ <vscale x 8 x i16> %1,
+ i16 -9,
+ <vscale x 8 x i1> %2,
+ iXLen %3, iXLen 1)
+
+ ret <vscale x 8 x i16> %a
+}
+
+define <vscale x 16 x i16> @intrinsic_vadd_vi_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vadd_vi_nxv16i16_nxv16i16_i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT: vadd.vi v8, v8, 9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x i16> @llvm.riscv.vadd.nxv16i16.i16(
+ <vscale x 16 x i16> undef,
+ <vscale x 16 x i16> %0,
+ i16 9,
+ iXLen %1)
+
+ ret <vscale x 16 x i16> %a
+}
+
+define <vscale x 16 x i16> @intrinsic_vadd_mask_vi_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv16i16_nxv16i16_i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
+; CHECK-NEXT: vadd.vi v8, v12, -9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x i16> @llvm.riscv.vadd.mask.nxv16i16.i16(
+ <vscale x 16 x i16> %0,
+ <vscale x 16 x i16> %1,
+ i16 -9,
+ <vscale x 16 x i1> %2,
+ iXLen %3, iXLen 1)
+
+ ret <vscale x 16 x i16> %a
+}
+
+define <vscale x 32 x i16> @intrinsic_vadd_vi_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vadd_vi_nxv32i16_nxv32i16_i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
+; CHECK-NEXT: vadd.vi v8, v8, 9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 32 x i16> @llvm.riscv.vadd.nxv32i16.i16(
+ <vscale x 32 x i16> undef,
+ <vscale x 32 x i16> %0,
+ i16 9,
+ iXLen %1)
+
+ ret <vscale x 32 x i16> %a
+}
+
+define <vscale x 32 x i16> @intrinsic_vadd_mask_vi_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv32i16_nxv32i16_i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu
+; CHECK-NEXT: vadd.vi v8, v16, -9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 32 x i16> @llvm.riscv.vadd.mask.nxv32i16.i16(
+ <vscale x 32 x i16> %0,
+ <vscale x 32 x i16> %1,
+ i16 -9,
+ <vscale x 32 x i1> %2,
+ iXLen %3, iXLen 1)
+
+ ret <vscale x 32 x i16> %a
+}
+
+define <vscale x 1 x i32> @intrinsic_vadd_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vadd_vi_nxv1i32_nxv1i32_i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT: vadd.vi v8, v8, 9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i32> @llvm.riscv.vadd.nxv1i32.i32(
+ <vscale x 1 x i32> undef,
+ <vscale x 1 x i32> %0,
+ i32 9,
+ iXLen %1)
+
+ ret <vscale x 1 x i32> %a
+}
+
+define <vscale x 1 x i32> @intrinsic_vadd_mask_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv1i32_nxv1i32_i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
+; CHECK-NEXT: vadd.vi v8, v9, -9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i32> @llvm.riscv.vadd.mask.nxv1i32.i32(
+ <vscale x 1 x i32> %0,
+ <vscale x 1 x i32> %1,
+ i32 -9,
+ <vscale x 1 x i1> %2,
+ iXLen %3, iXLen 1)
+
+ ret <vscale x 1 x i32> %a
+}
+
+define <vscale x 2 x i32> @intrinsic_vadd_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vadd_vi_nxv2i32_nxv2i32_i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT: vadd.vi v8, v8, 9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x i32> @llvm.riscv.vadd.nxv2i32.i32(
+ <vscale x 2 x i32> undef,
+ <vscale x 2 x i32> %0,
+ i32 9,
+ iXLen %1)
+
+ ret <vscale x 2 x i32> %a
+}
+
+define <vscale x 2 x i32> @intrinsic_vadd_mask_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv2i32_nxv2i32_i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
+; CHECK-NEXT: vadd.vi v8, v9, -9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x i32> @llvm.riscv.vadd.mask.nxv2i32.i32(
+ <vscale x 2 x i32> %0,
+ <vscale x 2 x i32> %1,
+ i32 -9,
+ <vscale x 2 x i1> %2,
+ iXLen %3, iXLen 1)
+
+ ret <vscale x 2 x i32> %a
+}
+
+define <vscale x 4 x i32> @intrinsic_vadd_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vadd_vi_nxv4i32_nxv4i32_i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT: vadd.vi v8, v8, 9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.i32(
+ <vscale x 4 x i32> undef,
+ <vscale x 4 x i32> %0,
+ i32 9,
+ iXLen %1)
+
+ ret <vscale x 4 x i32> %a
+}
+
+define <vscale x 4 x i32> @intrinsic_vadd_mask_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv4i32_nxv4i32_i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
+; CHECK-NEXT: vadd.vi v8, v10, -9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x i32> @llvm.riscv.vadd.mask.nxv4i32.i32(
+ <vscale x 4 x i32> %0,
+ <vscale x 4 x i32> %1,
+ i32 -9,
+ <vscale x 4 x i1> %2,
+ iXLen %3, iXLen 1)
+
+ ret <vscale x 4 x i32> %a
+}
+
+define <vscale x 8 x i32> @intrinsic_vadd_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vadd_vi_nxv8i32_nxv8i32_i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT: vadd.vi v8, v8, 9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x i32> @llvm.riscv.vadd.nxv8i32.i32(
+ <vscale x 8 x i32> undef,
+ <vscale x 8 x i32> %0,
+ i32 9,
+ iXLen %1)
+
+ ret <vscale x 8 x i32> %a
+}
+
+define <vscale x 8 x i32> @intrinsic_vadd_mask_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv8i32_nxv8i32_i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
+; CHECK-NEXT: vadd.vi v8, v12, -9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x i32> @llvm.riscv.vadd.mask.nxv8i32.i32(
+ <vscale x 8 x i32> %0,
+ <vscale x 8 x i32> %1,
+ i32 -9,
+ <vscale x 8 x i1> %2,
+ iXLen %3, iXLen 1)
+
+ ret <vscale x 8 x i32> %a
+}
+
+define <vscale x 16 x i32> @intrinsic_vadd_vi_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vadd_vi_nxv16i32_nxv16i32_i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
+; CHECK-NEXT: vadd.vi v8, v8, 9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x i32> @llvm.riscv.vadd.nxv16i32.i32(
+ <vscale x 16 x i32> undef,
+ <vscale x 16 x i32> %0,
+ i32 9,
+ iXLen %1)
+
+ ret <vscale x 16 x i32> %a
+}
+
+define <vscale x 16 x i32> @intrinsic_vadd_mask_vi_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv16i32_nxv16i32_i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu
+; CHECK-NEXT: vadd.vi v8, v16, -9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x i32> @llvm.riscv.vadd.mask.nxv16i32.i32(
+ <vscale x 16 x i32> %0,
+ <vscale x 16 x i32> %1,
+ i32 -9,
+ <vscale x 16 x i1> %2,
+ iXLen %3, iXLen 1)
+
+ ret <vscale x 16 x i32> %a
+}
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vfadd.ll b/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vfadd.ll
index f4b46b6b5857a..c85a697307bd6 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vfadd.ll
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vfadd.ll
@@ -762,3 +762,753 @@ entry:
ret <vscale x 8 x double> %a
}
+
+declare <vscale x 1 x half> @llvm.riscv.vfadd.nxv1f16.f16(
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ half,
+ iXLen, iXLen);
+
+define <vscale x 1 x half> @intrinsic_vfadd_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x half> %0, half %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfadd_vf_nxv1f16_nxv1f16_f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT: vfadd.vf v8, v8, fa0
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x half> @llvm.riscv.vfadd.nxv1f16.f16(
+ <vscale x 1 x half> undef,
+ <vscale x 1 x half> %0,
+ half %1,
+ iXLen 0, iXLen %2)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vfadd.mask.nxv1f16.f16(
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ half,
+ <vscale x 1 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 1 x half> @intrinsic_vfadd_mask_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv1f16_nxv1f16_f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
+; CHECK-NEXT: vfadd.vf v8, v9, fa0, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x half> @llvm.riscv.vfadd.mask.nxv1f16.f16(
+ <vscale x 1 x half> %0,
+ <vscale x 1 x half> %1,
+ half %2,
+ <vscale x 1 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 1)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 2 x half> @llvm.riscv.vfadd.nxv2f16.f16(
+ <vscale x 2 x half>,
+ <vscale x 2 x half>,
+ half,
+ iXLen, iXLen);
+
+define <vscale x 2 x half> @intrinsic_vfadd_vf_nxv2f16_nxv2f16_f16(<vscale x 2 x half> %0, half %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfadd_vf_nxv2f16_nxv2f16_f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT: vfadd.vf v8, v8, fa0
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x half> @llvm.riscv.vfadd.nxv2f16.f16(
+ <vscale x 2 x half> undef,
+ <vscale x 2 x half> %0,
+ half %1,
+ iXLen 0, iXLen %2)
+
+ ret <vscale x 2 x half> %a
+}
+
+declare <vscale x 2 x half> @llvm.riscv.vfadd.mask.nxv2f16.f16(
+ <vscale x 2 x half>,
+ <vscale x 2 x half>,
+ half,
+ <vscale x 2 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 2 x half> @intrinsic_vfadd_mask_vf_nxv2f16_nxv2f16_f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, half %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv2f16_nxv2f16_f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
+; CHECK-NEXT: vfadd.vf v8, v9, fa0, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x half> @llvm.riscv.vfadd.mask.nxv2f16.f16(
+ <vscale x 2 x half> %0,
+ <vscale x 2 x half> %1,
+ half %2,
+ <vscale x 2 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 1)
+
+ ret <vscale x 2 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vfadd.nxv4f16.f16(
+ <vscale x 4 x half>,
+ <vscale x 4 x half>,
+ half,
+ iXLen, iXLen);
+
+define <vscale x 4 x half> @intrinsic_vfadd_vf_nxv4f16_nxv4f16_f16(<vscale x 4 x half> %0, half %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfadd_vf_nxv4f16_nxv4f16_f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT: vfadd.vf v8, v8, fa0
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x half> @llvm.riscv.vfadd.nxv4f16.f16(
+ <vscale x 4 x half> undef,
+ <vscale x 4 x half> %0,
+ half %1,
+ iXLen 0, iXLen %2)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vfadd.mask.nxv4f16.f16(
+ <vscale x 4 x half>,
+ <vscale x 4 x half>,
+ half,
+ <vscale x 4 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 4 x half> @intrinsic_vfadd_mask_vf_nxv4f16_nxv4f16_f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, half %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv4f16_nxv4f16_f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
+; CHECK-NEXT: vfadd.vf v8, v9, fa0, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x half> @llvm.riscv.vfadd.mask.nxv4f16.f16(
+ <vscale x 4 x half> %0,
+ <vscale x 4 x half> %1,
+ half %2,
+ <vscale x 4 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 1)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 8 x half> @llvm.riscv.vfadd.nxv8f16.f16(
+ <vscale x 8 x half>,
+ <vscale x 8 x half>,
+ half,
+ iXLen, iXLen);
+
+define <vscale x 8 x half> @intrinsic_vfadd_vf_nxv8f16_nxv8f16_f16(<vscale x 8 x half> %0, half %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfadd_vf_nxv8f16_nxv8f16_f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT: vfadd.vf v8, v8, fa0
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x half> @llvm.riscv.vfadd.nxv8f16.f16(
+ <vscale x 8 x half> undef,
+ <vscale x 8 x half> %0,
+ half %1,
+ iXLen 0, iXLen %2)
+
+ ret <vscale x 8 x half> %a
+}
+
+declare <vscale x 8 x half> @llvm.riscv.vfadd.mask.nxv8f16.f16(
+ <vscale x 8 x half>,
+ <vscale x 8 x half>,
+ half,
+ <vscale x 8 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 8 x half> @intrinsic_vfadd_mask_vf_nxv8f16_nxv8f16_f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, half %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv8f16_nxv8f16_f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
+; CHECK-NEXT: vfadd.vf v8, v10, fa0, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x half> @llvm.riscv.vfadd.mask.nxv8f16.f16(
+ <vscale x 8 x half> %0,
+ <vscale x 8 x half> %1,
+ half %2,
+ <vscale x 8 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 1)
+
+ ret <vscale x 8 x half> %a
+}
+
+declare <vscale x 16 x half> @llvm.riscv.vfadd.nxv16f16.f16(
+ <vscale x 16 x half>,
+ <vscale x 16 x half>,
+ half,
+ iXLen, iXLen);
+
+define <vscale x 16 x half> @intrinsic_vfadd_vf_nxv16f16_nxv16f16_f16(<vscale x 16 x half> %0, half %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfadd_vf_nxv16f16_nxv16f16_f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT: vfadd.vf v8, v8, fa0
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x half> @llvm.riscv.vfadd.nxv16f16.f16(
+ <vscale x 16 x half> undef,
+ <vscale x 16 x half> %0,
+ half %1,
+ iXLen 0, iXLen %2)
+
+ ret <vscale x 16 x half> %a
+}
+
+declare <vscale x 16 x half> @llvm.riscv.vfadd.mask.nxv16f16.f16(
+ <vscale x 16 x half>,
+ <vscale x 16 x half>,
+ half,
+ <vscale x 16 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 16 x half> @intrinsic_vfadd_mask_vf_nxv16f16_nxv16f16_f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, half %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv16f16_nxv16f16_f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
+; CHECK-NEXT: vfadd.vf v8, v12, fa0, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x half> @llvm.riscv.vfadd.mask.nxv16f16.f16(
+ <vscale x 16 x half> %0,
+ <vscale x 16 x half> %1,
+ half %2,
+ <vscale x 16 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 1)
+
+ ret <vscale x 16 x half> %a
+}
+
+declare <vscale x 32 x half> @llvm.riscv.vfadd.nxv32f16.f16(
+ <vscale x 32 x half>,
+ <vscale x 32 x half>,
+ half,
+ iXLen, iXLen);
+
+define <vscale x 32 x half> @intrinsic_vfadd_vf_nxv32f16_nxv32f16_f16(<vscale x 32 x half> %0, half %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfadd_vf_nxv32f16_nxv32f16_f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
+; CHECK-NEXT: vfadd.vf v8, v8, fa0
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 32 x half> @llvm.riscv.vfadd.nxv32f16.f16(
+ <vscale x 32 x half> undef,
+ <vscale x 32 x half> %0,
+ half %1,
+ iXLen 0, iXLen %2)
+
+ ret <vscale x 32 x half> %a
+}
+
+declare <vscale x 32 x half> @llvm.riscv.vfadd.mask.nxv32f16.f16(
+ <vscale x 32 x half>,
+ <vscale x 32 x half>,
+ half,
+ <vscale x 32 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 32 x half> @intrinsic_vfadd_mask_vf_nxv32f16_nxv32f16_f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, half %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv32f16_nxv32f16_f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu
+; CHECK-NEXT: vfadd.vf v8, v16, fa0, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 32 x half> @llvm.riscv.vfadd.mask.nxv32f16.f16(
+ <vscale x 32 x half> %0,
+ <vscale x 32 x half> %1,
+ half %2,
+ <vscale x 32 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 1)
+
+ ret <vscale x 32 x half> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vfadd.nxv1f32.f32(
+ <vscale x 1 x float>,
+ <vscale x 1 x float>,
+ float,
+ iXLen, iXLen);
+
+define <vscale x 1 x float> @intrinsic_vfadd_vf_nxv1f32_nxv1f32_f32(<vscale x 1 x float> %0, float %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfadd_vf_nxv1f32_nxv1f32_f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT: vfadd.vf v8, v8, fa0
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vfadd.nxv1f32.f32(
+ <vscale x 1 x float> undef,
+ <vscale x 1 x float> %0,
+ float %1,
+ iXLen 0, iXLen %2)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vfadd.mask.nxv1f32.f32(
+ <vscale x 1 x float>,
+ <vscale x 1 x float>,
+ float,
+ <vscale x 1 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 1 x float> @intrinsic_vfadd_mask_vf_nxv1f32_nxv1f32_f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, float %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv1f32_nxv1f32_f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
+; CHECK-NEXT: vfadd.vf v8, v9, fa0, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vfadd.mask.nxv1f32.f32(
+ <vscale x 1 x float> %0,
+ <vscale x 1 x float> %1,
+ float %2,
+ <vscale x 1 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 1)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfadd.nxv2f32.f32(
+ <vscale x 2 x float>,
+ <vscale x 2 x float>,
+ float,
+ iXLen, iXLen);
+
+define <vscale x 2 x float> @intrinsic_vfadd_vf_nxv2f32_nxv2f32_f32(<vscale x 2 x float> %0, float %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfadd_vf_nxv2f32_nxv2f32_f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT: vfadd.vf v8, v8, fa0
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x float> @llvm.riscv.vfadd.nxv2f32.f32(
+ <vscale x 2 x float> undef,
+ <vscale x 2 x float> %0,
+ float %1,
+ iXLen 0, iXLen %2)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfadd.mask.nxv2f32.f32(
+ <vscale x 2 x float>,
+ <vscale x 2 x float>,
+ float,
+ <vscale x 2 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 2 x float> @intrinsic_vfadd_mask_vf_nxv2f32_nxv2f32_f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, float %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv2f32_nxv2f32_f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
+; CHECK-NEXT: vfadd.vf v8, v9, fa0, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x float> @llvm.riscv.vfadd.mask.nxv2f32.f32(
+ <vscale x 2 x float> %0,
+ <vscale x 2 x float> %1,
+ float %2,
+ <vscale x 2 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 1)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vfadd.nxv4f32.f32(
+ <vscale x 4 x float>,
+ <vscale x 4 x float>,
+ float,
+ iXLen, iXLen);
+
+define <vscale x 4 x float> @intrinsic_vfadd_vf_nxv4f32_nxv4f32_f32(<vscale x 4 x float> %0, float %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfadd_vf_nxv4f32_nxv4f32_f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT: vfadd.vf v8, v8, fa0
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x float> @llvm.riscv.vfadd.nxv4f32.f32(
+ <vscale x 4 x float> undef,
+ <vscale x 4 x float> %0,
+ float %1,
+ iXLen 0, iXLen %2)
+
+ ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vfadd.mask.nxv4f32.f32(
+ <vscale x 4 x float>,
+ <vscale x 4 x float>,
+ float,
+ <vscale x 4 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 4 x float> @intrinsic_vfadd_mask_vf_nxv4f32_nxv4f32_f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, float %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv4f32_nxv4f32_f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
+; CHECK-NEXT: vfadd.vf v8, v10, fa0, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x float> @llvm.riscv.vfadd.mask.nxv4f32.f32(
+ <vscale x 4 x float> %0,
+ <vscale x 4 x float> %1,
+ float %2,
+ <vscale x 4 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 1)
+
+ ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vfadd.nxv8f32.f32(
+ <vscale x 8 x float>,
+ <vscale x 8 x float>,
+ float,
+ iXLen, iXLen);
+
+define <vscale x 8 x float> @intrinsic_vfadd_vf_nxv8f32_nxv8f32_f32(<vscale x 8 x float> %0, float %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfadd_vf_nxv8f32_nxv8f32_f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT: vfadd.vf v8, v8, fa0
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x float> @llvm.riscv.vfadd.nxv8f32.f32(
+ <vscale x 8 x float> undef,
+ <vscale x 8 x float> %0,
+ float %1,
+ iXLen 0, iXLen %2)
+
+ ret <vscale x 8 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vfadd.mask.nxv8f32.f32(
+ <vscale x 8 x float>,
+ <vscale x 8 x float>,
+ float,
+ <vscale x 8 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 8 x float> @intrinsic_vfadd_mask_vf_nxv8f32_nxv8f32_f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, float %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv8f32_nxv8f32_f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
+; CHECK-NEXT: vfadd.vf v8, v12, fa0, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x float> @llvm.riscv.vfadd.mask.nxv8f32.f32(
+ <vscale x 8 x float> %0,
+ <vscale x 8 x float> %1,
+ float %2,
+ <vscale x 8 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 1)
+
+ ret <vscale x 8 x float> %a
+}
+
+declare <vscale x 16 x float> @llvm.riscv.vfadd.nxv16f32.f32(
+ <vscale x 16 x float>,
+ <vscale x 16 x float>,
+ float,
+ iXLen, iXLen);
+
+define <vscale x 16 x float> @intrinsic_vfadd_vf_nxv16f32_nxv16f32_f32(<vscale x 16 x float> %0, float %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfadd_vf_nxv16f32_nxv16f32_f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
+; CHECK-NEXT: vfadd.vf v8, v8, fa0
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x float> @llvm.riscv.vfadd.nxv16f32.f32(
+ <vscale x 16 x float> undef,
+ <vscale x 16 x float> %0,
+ float %1,
+ iXLen 0, iXLen %2)
+
+ ret <vscale x 16 x float> %a
+}
+
+declare <vscale x 16 x float> @llvm.riscv.vfadd.mask.nxv16f32.f32(
+ <vscale x 16 x float>,
+ <vscale x 16 x float>,
+ float,
+ <vscale x 16 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 16 x float> @intrinsic_vfadd_mask_vf_nxv16f32_nxv16f32_f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, float %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv16f32_nxv16f32_f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu
+; CHECK-NEXT: vfadd.vf v8, v16, fa0, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x float> @llvm.riscv.vfadd.mask.nxv16f32.f32(
+ <vscale x 16 x float> %0,
+ <vscale x 16 x float> %1,
+ float %2,
+ <vscale x 16 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 1)
+
+ ret <vscale x 16 x float> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.f64(
+ <vscale x 1 x double>,
+ <vscale x 1 x double>,
+ double,
+ iXLen, iXLen);
+
+define <vscale x 1 x double> @intrinsic_vfadd_vf_nxv1f64_nxv1f64_f64(<vscale x 1 x double> %0, double %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfadd_vf_nxv1f64_nxv1f64_f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT: vfadd.vf v8, v8, fa0
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.f64(
+ <vscale x 1 x double> undef,
+ <vscale x 1 x double> %0,
+ double %1,
+ iXLen 0, iXLen %2)
+
+ ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vfadd.mask.nxv1f64.f64(
+ <vscale x 1 x double>,
+ <vscale x 1 x double>,
+ double,
+ <vscale x 1 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 1 x double> @intrinsic_vfadd_mask_vf_nxv1f64_nxv1f64_f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, double %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv1f64_nxv1f64_f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
+; CHECK-NEXT: vfadd.vf v8, v9, fa0, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x double> @llvm.riscv.vfadd.mask.nxv1f64.f64(
+ <vscale x 1 x double> %0,
+ <vscale x 1 x double> %1,
+ double %2,
+ <vscale x 1 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 1)
+
+ ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vfadd.nxv2f64.f64(
+ <vscale x 2 x double>,
+ <vscale x 2 x double>,
+ double,
+ iXLen, iXLen);
+
+define <vscale x 2 x double> @intrinsic_vfadd_vf_nxv2f64_nxv2f64_f64(<vscale x 2 x double> %0, double %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfadd_vf_nxv2f64_nxv2f64_f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; CHECK-NEXT: vfadd.vf v8, v8, fa0
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x double> @llvm.riscv.vfadd.nxv2f64.f64(
+ <vscale x 2 x double> undef,
+ <vscale x 2 x double> %0,
+ double %1,
+ iXLen 0, iXLen %2)
+
+ ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vfadd.mask.nxv2f64.f64(
+ <vscale x 2 x double>,
+ <vscale x 2 x double>,
+ double,
+ <vscale x 2 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 2 x double> @intrinsic_vfadd_mask_vf_nxv2f64_nxv2f64_f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, double %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv2f64_nxv2f64_f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
+; CHECK-NEXT: vfadd.vf v8, v10, fa0, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x double> @llvm.riscv.vfadd.mask.nxv2f64.f64(
+ <vscale x 2 x double> %0,
+ <vscale x 2 x double> %1,
+ double %2,
+ <vscale x 2 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 1)
+
+ ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vfadd.nxv4f64.f64(
+ <vscale x 4 x double>,
+ <vscale x 4 x double>,
+ double,
+ iXLen, iXLen);
+
+define <vscale x 4 x double> @intrinsic_vfadd_vf_nxv4f64_nxv4f64_f64(<vscale x 4 x double> %0, double %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfadd_vf_nxv4f64_nxv4f64_f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; CHECK-NEXT: vfadd.vf v8, v8, fa0
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x double> @llvm.riscv.vfadd.nxv4f64.f64(
+ <vscale x 4 x double> undef,
+ <vscale x 4 x double> %0,
+ double %1,
+ iXLen 0, iXLen %2)
+
+ ret <vscale x 4 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vfadd.mask.nxv4f64.f64(
+ <vscale x 4 x double>,
+ <vscale x 4 x double>,
+ double,
+ <vscale x 4 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 4 x double> @intrinsic_vfadd_mask_vf_nxv4f64_nxv4f64_f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, double %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv4f64_nxv4f64_f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
+; CHECK-NEXT: vfadd.vf v8, v12, fa0, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x double> @llvm.riscv.vfadd.mask.nxv4f64.f64(
+ <vscale x 4 x double> %0,
+ <vscale x 4 x double> %1,
+ double %2,
+ <vscale x 4 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 1)
+
+ ret <vscale x 4 x double> %a
+}
+
+declare <vscale x 8 x double> @llvm.riscv.vfadd.nxv8f64.f64(
+ <vscale x 8 x double>,
+ <vscale x 8 x double>,
+ double,
+ iXLen, iXLen);
+
+define <vscale x 8 x double> @intrinsic_vfadd_vf_nxv8f64_nxv8f64_f64(<vscale x 8 x double> %0, double %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vfadd_vf_nxv8f64_nxv8f64_f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; CHECK-NEXT: vfadd.vf v8, v8, fa0
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x double> @llvm.riscv.vfadd.nxv8f64.f64(
+ <vscale x 8 x double> undef,
+ <vscale x 8 x double> %0,
+ double %1,
+ iXLen 0, iXLen %2)
+
+ ret <vscale x 8 x double> %a
+}
+
+declare <vscale x 8 x double> @llvm.riscv.vfadd.mask.nxv8f64.f64(
+ <vscale x 8 x double>,
+ <vscale x 8 x double>,
+ double,
+ <vscale x 8 x i1>,
+ iXLen, iXLen, iXLen);
+
+define <vscale x 8 x double> @intrinsic_vfadd_mask_vf_nxv8f64_nxv8f64_f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, double %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv8f64_nxv8f64_f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu
+; CHECK-NEXT: vfadd.vf v8, v16, fa0, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x double> @llvm.riscv.vfadd.mask.nxv8f64.f64(
+ <vscale x 8 x double> %0,
+ <vscale x 8 x double> %1,
+ double %2,
+ <vscale x 8 x i1> %3,
+ iXLen 0, iXLen %4, iXLen 1)
+
+ ret <vscale x 8 x double> %a
+}
More information about the llvm-commits
mailing list